patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -5,9 +5,9 @@ import sys # Simple python version test major,minor = sys.version_info[:2] py_version = sys.version.split()[0] -if major != 2 or minor < 5: +if major != 2 or minor < 6: # SystemExit defaults to returning 1 when printing a string to stderr - raise SystemExit("You are using python %s, but version 2.5 or greater is required" % py_version) + raise SystemExit("You are using python %s, but version 2.7 or greater is required" % py_version) required = 0 optional = 0
1
#!/usr/bin/env python import sys # Simple python version test major,minor = sys.version_info[:2] py_version = sys.version.split()[0] if major != 2 or minor < 5: # SystemExit defaults to returning 1 when printing a string to stderr raise SystemExit("You are using python %s, but version 2.5 or greater is required" % py_version) required = 0 optional = 0 # Test for whisper try: import whisper except ImportError: # No? test for ceres try: import ceres # We imported ceres, but not whisper so it's an optional dependency sys.stderr.write("[OPTIONAL] Unable to import the 'whisper' module. Without it the webapp will be unable to read .wsp files\n") optional += 1 except ImportError: sys.stderr.write("[REQUIRED] Unable to import the 'whisper' or 'ceres' modules, please download this package from the Graphite project page and install it.\n") required += 1 # Test for pycairo or cairocffi try: import cairo except ImportError: try: import cairocffi as cairo except ImportError: sys.stderr.write("[REQUIRED] Unable to import the 'cairo' module, do you have pycairo installed for python %s?\n" % py_version) cairo = None required += 1 # Test that pycairo has the PNG backend try: if cairo: surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 10, 10) del surface except Exception: sys.stderr.write("[REQUIRED] Failed to create an ImageSurface with cairo, you probably need to recompile cairo with PNG support\n") required += 1 # Test that cairo can find fonts try: if cairo: surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 10, 10) context = cairo.Context(surface) context.font_extents() del surface, context except Exception: sys.stderr.write("[REQUIRED] Failed to create text with cairo, this probably means cairo cant find any fonts. Install some system fonts and try again\n") # Test for django try: import django except ImportError: sys.stderr.write("[REQUIRED] Unable to import the 'django' module, do you have Django installed for python %s?\n" % py_version) django = None required += 1 # Test for pytz try: import pytz except ImportError: sys.stderr.write("[REQUIRED] Unable to import the 'pytz' module, do you have pytz module installed for python %s?\n" % py_version) required += 1 # Test for pyparsing try: import pyparsing except ImportError: sys.stderr.write("[REQUIRED] Unable to import the 'pyparsing' module, do you have pyparsing module installed for python %s?\n" % py_version) required += 1 # Test for django-tagging try: import tagging except ImportError: sys.stderr.write("[REQUIRED] Unable to import the 'tagging' module, do you have django-tagging installed for python %s?\n" % py_version) required += 1 if django and django.VERSION[:2] < (1,4): sys.stderr.write("[REQUIRED] You have django version %s installed, but version 1.4 or greater is required\n" % django.get_version()) required += 1 # Test for a json module try: import json except ImportError: try: import simplejson except ImportError: sys.stderr.write("[REQUIRED] Unable to import either the 'json' or 'simplejson' module, at least one is required.\n") required += 1 # Test for zope.interface try: from zope.interface import Interface except ImportError: sys.stderr.write("[OPTIONAL] Unable to import Interface from zope.interface. Without it, you will be unable to run carbon on this server.\n") optional +=1 # Test for python-memcached try: import memcache except ImportError: sys.stderr.write("[OPTIONAL] Unable to import the 'memcache' module, do you have python-memcached installed for python %s? This feature is not required but greatly improves performance.\n" % py_version) optional += 1 # Test for python-ldap try: import ldap except ImportError: sys.stderr.write("[OPTIONAL] Unable to import the 'ldap' module, do you have python-ldap installed for python %s? Without python-ldap, you will not be able to use LDAP authentication in the graphite webapp.\n" % py_version) optional += 1 # Test for Twisted python try: import twisted except ImportError: sys.stderr.write("[OPTIONAL] Unable to import the 'twisted' package, do you have Twisted installed for python %s? Without Twisted, you cannot run carbon on this server.\n" % py_version) optional += 1 else: tv = [] tv = twisted.__version__.split('.') if int(tv[0]) < 8 or (int(tv[0]) == 8 and int(tv[1]) < 2): print "[OPTIONAL] Your version of Twisted is too old to run carbon. You will not be able to run carbon on this server until you upgrade Twisted >= 8.2.\n" optional += 1 # Test for txamqp try: import txamqp except ImportError: sys.stderr.write("[OPTIONAL] Unable to import the 'txamqp' module, this is required if you want to use AMQP as an input to Carbon. Note that txamqp requires python 2.5 or greater.\n") optional += 1 # Test for python-rrdtool try: import rrdtool except ImportError: sys.stderr.write("[OPTIONAL] Unable to import the 'python-rrdtool' module, this is required for reading RRD.\n") optional += 1 if optional: sys.stderr.write("%d optional dependencies not met. Please consider the optional items before proceeding.\n" % optional) else: print "All optional dependencies are met." if required: sys.stderr.write("%d necessary dependencies not met. Graphite will not function until these dependencies are fulfilled.\n" % required) sys.exit(1) else: print "All necessary dependencies are met."
1
9,054
Is this meant to be "2.7" or "2.6". The two lines in this commit don't agree with one another. Looks like everything else in this PR is "2.6" and that seems sensible.
graphite-project-graphite-web
py
@@ -21,8 +21,8 @@ # ---------------------------------------------------------------------- import unittest -from nupic.data.pattern_machine import (PatternMachine, - ConsecutivePatternMachine) +from nupic.data.generators.pattern_machine import (PatternMachine, + ConsecutivePatternMachine)
1
#!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2014, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import unittest from nupic.data.pattern_machine import (PatternMachine, ConsecutivePatternMachine) class PatternMachineTest(unittest.TestCase): def setUp(self): self.patternMachine = PatternMachine(10000, 5, num=50) def testGet(self): patternA = self.patternMachine.get(48) self.assertEqual(len(patternA), 5) patternB = self.patternMachine.get(49) self.assertEqual(len(patternB), 5) self.assertEqual(patternA & patternB, set()) def testGetOutOfBounds(self): args = [50] self.assertRaises(IndexError, self.patternMachine.get, *args) def testAddNoise(self): patternMachine = PatternMachine(10000, 1000, num=1) pattern = patternMachine.get(0) noisy = patternMachine.addNoise(pattern, 0.0) self.assertEqual(len(pattern & noisy), 1000) noisy = patternMachine.addNoise(pattern, 0.5) self.assertTrue(400 < len(pattern & noisy) < 600) noisy = patternMachine.addNoise(pattern, 1.0) self.assertTrue(50 < len(pattern & noisy) < 150) def testNumbersForBit(self): pattern = self.patternMachine.get(49) for bit in pattern: self.assertEqual(self.patternMachine.numbersForBit(bit), set([49])) def testNumbersForBitOutOfBounds(self): args = [10000] self.assertRaises(IndexError, self.patternMachine.numbersForBit, *args) def testNumberMapForBits(self): pattern = self.patternMachine.get(49) numberMap = self.patternMachine.numberMapForBits(pattern) self.assertEqual(numberMap.keys(), [49]) self.assertEqual(numberMap[49], pattern) def testWList(self): w = [4, 7, 11] patternMachine = PatternMachine(100, w, num=50) widths = dict((el, 0) for el in w) for i in range(50): pattern = patternMachine.get(i) width = len(pattern) self.assertTrue(width in w) widths[len(pattern)] += 1 for i in w: self.assertTrue(widths[i] > 0) class ConsecutivePatternMachineTest(unittest.TestCase): def setUp(self): self.patternMachine = ConsecutivePatternMachine(100, 5) def testGet(self): pattern = self.patternMachine.get(18) self.assertEqual(len(pattern), 5) self.assertEqual(pattern, set([90, 91, 92, 93, 94])) pattern = self.patternMachine.get(19) self.assertEqual(len(pattern), 5) self.assertEqual(pattern, set([95, 96, 97, 98, 99])) def testGetOutOfBounds(self): args = [20] self.assertRaises(IndexError, self.patternMachine.get, *args) if __name__ == '__main__': unittest.main()
1
17,488
Should we put these tests in a `generators` directory?
numenta-nupic
py
@@ -356,8 +356,8 @@ struct greater_equal_op { struct and_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { - const bool b1 = x1 != zero && !std::isnan(x1); - const bool b2 = x2 != zero && !std::isnan(x2); + const auto& b1 = x1 >= DataType(0.5); + const auto& b2 = x2 >= DataType(0.5); return (b1 && b2) ? one : zero; } inline void operator()(const DataType& x1,
1
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #include "lbann/layers/math/binary.hpp" #include "lbann/utils/entrywise_operator.hpp" namespace lbann { namespace { // Helpful constants constexpr DataType zero = 0; constexpr DataType one = 1; /** Apply a binary backprop operator to CPU data. * The input and output data must be on CPU and must have the same * dimensions. Given a binary function \f$ y = f(x_1,x_2) \f$, the * corresponding BinaryBackPropOperator is a 5-ary function with the * arguments \f$ x_1 \f$, \f$ x_2 \f$, \f$ dL/dy \f$, \f$ dL/dx_1\f$, * \f$ dL/dx_2 \f$. The last two arguments should be overwritten when * the BinaryBackPropOperator is called. */ template <typename BinaryBackPropOperator> void apply_binary_backprop_operator(const AbsMat& x1, const AbsMat& x2, const AbsMat& dy, AbsMat& dx1, AbsMat& dx2) { if (x1.Contiguous() && x2.Contiguous() && dy.Contiguous() && dx1.Contiguous() && dx2.Contiguous()) { const auto* x1_buffer = x1.LockedBuffer(); const auto* x2_buffer = x2.LockedBuffer(); const auto* dy_buffer = dy.LockedBuffer(); auto* dx1_buffer = dx1.Buffer(); auto* dx2_buffer = dx2.Buffer(); const size_t size = x1.Height() * x1.Width(); #pragma omp parallel for for (size_t i = 0; i < size; ++i) { BinaryBackPropOperator op; op(x1_buffer[i], x2_buffer[i], dy_buffer[i], dx1_buffer[i], dx2_buffer[i]); } } else { #pragma omp parallel for collapse(2) for (El::Int col = 0; col < x1.Width(); ++col) { for (El::Int row = 0; row < x2.Height(); ++row) { BinaryBackPropOperator op; op(x1(row, col), x2(row, col), dy(row, col), dx1(row, col), dx2(row, col)); } } } } // ========================================================= // Operator objects for entry-wise binary layers // ========================================================= // Note: Binary operator corresponds to forward prop step // (\f$ y = f(x_1,x_2) \f$) and 5-ary operator corresponds // to back prop step // (\f$ \frac{dL}{dx_i} = \frac{dL}{dy} \frac{df}{dx_i}(x_1,x_2) \f$). /** Add operator. */ struct add_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { return x1 + x2; } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { dx1 = dy; dx2 = dy; } }; /** Subtract operator. */ struct subtract_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { return x1 - x2; } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { dx1 = dy; dx2 = -dy; } }; /** Multiply operator. */ struct multiply_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { return x1 * x2; } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { dx1 = dy * x2; dx2 = dy * x1; } }; /** Divide operator. */ struct divide_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { return x1 / x2; } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { dx1 = dy / x2; dx2 = -dy * x1 / (x2*x2); } }; /** Modulo operator. */ struct mod_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { return std::fmod(x1, x2); } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { dx1 = dy; dx2 = -dy * std::floor(x1 / x2); } }; /** Power operator. */ struct pow_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { return std::pow(x1, x2); } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { dx1 = dy * x2 * std::pow(x1, x2 - one); dx2 = dy * std::log(x1) * std::pow(x1, x2); } }; /** Safe divide operator. * If a standard division produces an infinity or NaN, zero is output * instead. */ struct safe_divide_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { const auto& y = x1 / x2; if (std::isfinite(y)) { return y; } else { return zero; } } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { const auto& y = x1 / x2; if (std::isfinite(y)) { dx1 = dy / x2; dx2 = -dy * x1 / (x2*x2); } else { dx1 = zero; dx2 = zero; } } }; /** Maximum operator. */ struct max_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { return std::max(x1, x2); } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { if (x1 > x2) { dx1 = dy; dx2 = zero; } else if (x2 > x1) { dx1 = zero; dx2 = dy; } else { dx1 = dy / 2; dx2 = dy / 2; } } }; /** Minimum operator. */ struct min_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { return std::min(x1, x2); } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { if (x1 < x2) { dx1 = dy; dx2 = zero; } else if (x2 < x1) { dx1 = zero; dx2 = dy; } else { dx1 = dy / 2; dx2 = dy / 2; } } }; /** Equal operator. */ struct equal_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { return x1 == x2 ? one : zero; } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { dx1 = zero; dx2 = zero; } }; /** Not equal operator. */ struct not_equal_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { return x1 == x2 ? zero : one; } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { dx1 = zero; dx2 = zero; } }; /** Less than operator. */ struct less_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { return x1 < x2 ? one : zero; } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { dx1 = zero; dx2 = zero; } }; /** Less than or equal operator. */ struct less_equal_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { return x1 <= x2 ? one : zero; } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { dx1 = zero; dx2 = zero; } }; /** Greater than operator. */ struct greater_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { return x1 > x2 ? one : zero; } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { dx1 = zero; dx2 = zero; } }; /** Greater than or equal operator. */ struct greater_equal_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { return x1 >= x2 ? one : zero; } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { dx1 = zero; dx2 = zero; } }; /** Logical and operator. */ struct and_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { const bool b1 = x1 != zero && !std::isnan(x1); const bool b2 = x2 != zero && !std::isnan(x2); return (b1 && b2) ? one : zero; } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { dx1 = zero; dx2 = zero; } }; /** Logical or operator. */ struct or_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { const bool b1 = x1 != zero && !std::isnan(x1); const bool b2 = x2 != zero && !std::isnan(x2); return (b1 || b2) ? one : zero; } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { dx1 = zero; dx2 = zero; } }; /** Logical xor operator. */ struct xor_op { inline DataType operator()(const DataType& x1, const DataType& x2) const { const bool b1 = x1 != zero && !std::isnan(x1); const bool b2 = x2 != zero && !std::isnan(x2); return (b1 || b2) && !(b1 && b2) ? one : zero; } inline void operator()(const DataType& x1, const DataType& x2, const DataType& dy, DataType& dx1, DataType& dx2) const { dx1 = zero; dx2 = zero; } }; } // namespace // Template instantiation #define INSTANTIATE(layer, op) \ template <> \ void layer<data_layout::MODEL_PARALLEL, El::Device::CPU> \ ::fp_compute() { \ apply_entrywise_binary_operator<op>(get_prev_activations(0), \ get_prev_activations(1), \ get_activations()); \ } \ template <> \ void layer<data_layout::MODEL_PARALLEL, El::Device::CPU> \ ::bp_compute() { \ apply_binary_backprop_operator<op>(get_local_prev_activations(0), \ get_local_prev_activations(1), \ get_local_prev_error_signals(), \ get_local_error_signals(0), \ get_local_error_signals(1)); \ } \ template <> \ void layer<data_layout::DATA_PARALLEL, El::Device::CPU> \ ::fp_compute() { \ apply_entrywise_binary_operator<op>(get_prev_activations(0), \ get_prev_activations(1), \ get_activations()); \ } \ template <> \ void layer<data_layout::DATA_PARALLEL, El::Device::CPU> \ ::bp_compute() { \ apply_binary_backprop_operator<op>(get_local_prev_activations(0), \ get_local_prev_activations(1), \ get_local_prev_error_signals(), \ get_local_error_signals(0), \ get_local_error_signals(1)); \ } INSTANTIATE(add_layer, add_op) INSTANTIATE(subtract_layer, subtract_op) INSTANTIATE(multiply_layer, multiply_op) INSTANTIATE(divide_layer, divide_op) INSTANTIATE(mod_layer, mod_op) INSTANTIATE(pow_layer, pow_op) INSTANTIATE(safe_divide_layer, safe_divide_op) INSTANTIATE(max_layer, max_op) INSTANTIATE(min_layer, min_op) INSTANTIATE(equal_layer, equal_op) INSTANTIATE(not_equal_layer, not_equal_op) INSTANTIATE(less_layer, less_op) INSTANTIATE(less_equal_layer, less_equal_op) INSTANTIATE(greater_layer, greater_op) INSTANTIATE(greater_equal_layer, greater_equal_op) INSTANTIATE(and_layer, and_op) INSTANTIATE(or_layer, or_op) INSTANTIATE(xor_layer, xor_op) } // namespace lbann
1
13,266
I think that it makes more sense to keep these with the standard definition of non-zero is true and zero is false.
LLNL-lbann
cpp
@@ -34,9 +34,9 @@ class DummyRequest(mock.MagicMock): self.authn_type = 'basicauth' self.prefixed_userid = 'basicauth:bob' self.effective_principals = [ - self.prefixed_userid, 'system.Everyone', - 'system.Authenticated'] + 'system.Authenticated', + 'bob'] self.json = {} self.validated = {} self.matchdict = {}
1
import os import threading import unittest from collections import defaultdict import mock import webtest from cornice import errors as cornice_errors from enum import Enum from pyramid.url import parse_url_overrides from kinto.core import DEFAULT_SETTINGS from kinto.core import statsd from kinto.core.storage import generators from kinto.core.utils import sqlalchemy, follow_subrequest, encode64 skip_if_travis = unittest.skipIf('TRAVIS' in os.environ, "travis") skip_if_no_postgresql = unittest.skipIf(sqlalchemy is None, "postgresql is not installed.") skip_if_no_statsd = unittest.skipIf(not statsd.statsd_module, "statsd is not installed.") class DummyRequest(mock.MagicMock): """Fully mocked request. """ def __init__(self, *args, **kwargs): super(DummyRequest, self).__init__(*args, **kwargs) self.upath_info = '/v0/' self.registry = mock.MagicMock(settings=DEFAULT_SETTINGS.copy()) self.registry.id_generators = defaultdict(generators.UUID4) self.GET = {} self.headers = {} self.errors = cornice_errors.Errors() self.authenticated_userid = 'bob' self.authn_type = 'basicauth' self.prefixed_userid = 'basicauth:bob' self.effective_principals = [ self.prefixed_userid, 'system.Everyone', 'system.Authenticated'] self.json = {} self.validated = {} self.matchdict = {} self.response = mock.MagicMock(headers={}) def route_url(*a, **kw): # XXX: refactor DummyRequest to take advantage of `pyramid.testing` parts = parse_url_overrides(kw) return ''.join([p for p in parts if p]) self.route_url = route_url follow_subrequest = follow_subrequest def get_request_class(prefix): class PrefixedRequestClass(webtest.app.TestRequest): @classmethod def blank(cls, path, *args, **kwargs): if prefix: path = '/%s%s' % (prefix, path) return webtest.app.TestRequest.blank(path, *args, **kwargs) return PrefixedRequestClass class FormattedErrorMixin(object): """Test mixin in order to perform advanced error responses assertions. """ def assertFormattedError(self, response, code, errno, error, message=None, info=None): if isinstance(errno, Enum): errno = errno.value self.assertIn('application/json', response.headers['Content-Type']) self.assertEqual(response.json['code'], code) self.assertEqual(response.json['errno'], errno) self.assertEqual(response.json['error'], error) if message is not None: self.assertIn(message, response.json['message']) else: # pragma: no cover self.assertNotIn('message', response.json) if info is not None: self.assertIn(info, response.json['info']) else: # pragma: no cover self.assertNotIn('info', response.json) def get_user_headers(user): """Helper to obtain a Basic Auth authorization headers from the specified `user` (e.g. ``"user:pass"``) :rtype: dict """ credentials = "%s:secret" % user authorization = 'Basic {0}'.format(encode64(credentials)) return { 'Authorization': authorization } class BaseWebTest(object): """Base Web Test to test your kinto.core service. It setups the database before each test and delete it after. """ api_prefix = "v0" """URL version prefix""" entry_point = None """Main application entry""" def __init__(self, *args, **kwargs): super(BaseWebTest, self).__init__(*args, **kwargs) self.app = self.make_app() self.storage = self.app.app.registry.storage self.cache = self.app.app.registry.cache self.permission = self.app.app.registry.permission self.storage.initialize_schema() self.permission.initialize_schema() self.cache.initialize_schema() self.headers = { 'Content-Type': 'application/json' } def make_app(self, settings=None, config=None): """Instantiate the application and setup requests to use the api prefix. :param dict settings: extra settings values :param pyramid.config.Configurator config: already initialized config :returns: webtest application instance """ settings = self.get_app_settings(extras=settings) try: main = self.entry_point.__func__ except AttributeError: # pragma: no cover main = self.entry_point.im_func wsgi_app = main({}, config=config, **settings) app = webtest.TestApp(wsgi_app) app.RequestClass = get_request_class(self.api_prefix) return app def get_app_settings(self, extras=None): """Application settings to be used. Override to tweak default settings for the tests. :param dict extras: extra settings values :rtype: dict """ settings = DEFAULT_SETTINGS.copy() settings['storage_backend'] = 'kinto.core.storage.memory' settings['cache_backend'] = 'kinto.core.cache.memory' settings['permission_backend'] = 'kinto.core.permission.memory' if extras is not None: settings.update(extras) return settings def tearDown(self): super(BaseWebTest, self).tearDown() self.storage.flush() self.cache.flush() self.permission.flush() class ThreadMixin(object): def setUp(self): super(ThreadMixin, self).setUp() self._threads = [] def tearDown(self): super(ThreadMixin, self).tearDown() for thread in self._threads: thread.join() def _create_thread(self, *args, **kwargs): thread = threading.Thread(*args, **kwargs) self._threads.append(thread) return thread
1
10,249
Any idea why you want to change the behavior here? Is there a security risk not to have the prefix in the principal here.
Kinto-kinto
py
@@ -47,6 +47,7 @@ type ConfigMock struct { mockMdcache *MockMDCache mockKcache *MockKeyCache mockBcache *MockBlockCache + mockDBcache *MockDirtyBlockCache mockCrypto *MockCrypto mockCodec *MockCodec mockMdops *MockMDOps
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "time" "github.com/golang/mock/gomock" "github.com/keybase/client/go/logger" "golang.org/x/net/context" ) type FakeObserver struct { localChange Node batchChanges []NodeChange ctx context.Context } func (fn *FakeObserver) LocalChange(ctx context.Context, node Node, write WriteRange) { fn.localChange = node fn.ctx = ctx } func (fn *FakeObserver) BatchChanges( ctx context.Context, nodeChanges []NodeChange) { fn.batchChanges = nodeChanges fn.ctx = ctx } func (fn *FakeObserver) TlfHandleChange(ctx context.Context, newHandle *TlfHandle) { return } type ConfigMock struct { ConfigLocal // local references to the proper mock type mockKbfs *MockKBFSOps mockKbpki *MockKBPKI mockKbd *MockKeybaseDaemon mockKeyman *MockKeyManager mockRep *MockReporter mockMdcache *MockMDCache mockKcache *MockKeyCache mockBcache *MockBlockCache mockCrypto *MockCrypto mockCodec *MockCodec mockMdops *MockMDOps mockKops *MockKeyOps mockBops *MockBlockOps mockMdserv *MockMDServer mockKserv *MockKeyServer mockBserv *MockBlockServer mockBsplit *MockBlockSplitter mockNotifier *MockNotifier mockClock *MockClock mockRekeyQueue *MockRekeyQueue observer *FakeObserver ctr *SafeTestReporter } func NewConfigMock(c *gomock.Controller, ctr *SafeTestReporter) *ConfigMock { config := &ConfigMock{} config.mockKbfs = NewMockKBFSOps(c) config.SetKBFSOps(config.mockKbfs) config.mockKbd = NewMockKeybaseDaemon(c) config.SetKeybaseDaemon(config.mockKbd) config.mockKbpki = NewMockKBPKI(c) config.SetKBPKI(config.mockKbpki) config.mockKeyman = NewMockKeyManager(c) config.SetKeyManager(config.mockKeyman) config.mockRep = NewMockReporter(c) config.SetReporter(config.mockRep) config.mockMdcache = NewMockMDCache(c) config.SetMDCache(config.mockMdcache) config.mockKcache = NewMockKeyCache(c) config.SetKeyCache(config.mockKcache) config.mockBcache = NewMockBlockCache(c) config.SetBlockCache(config.mockBcache) config.mockCrypto = NewMockCrypto(c) config.SetCrypto(config.mockCrypto) config.mockCodec = NewMockCodec(c) config.mockCodec.EXPECT().RegisterType(gomock.Any(), gomock.Any()). AnyTimes().Return() config.mockCodec.EXPECT().RegisterIfaceSliceType(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return() config.SetCodec(config.mockCodec) config.mockMdops = NewMockMDOps(c) config.SetMDOps(config.mockMdops) config.mockKops = NewMockKeyOps(c) config.SetKeyOps(config.mockKops) config.mockBops = NewMockBlockOps(c) config.SetBlockOps(config.mockBops) config.mockMdserv = NewMockMDServer(c) config.SetMDServer(config.mockMdserv) config.mockKserv = NewMockKeyServer(c) config.SetKeyServer(config.mockKserv) config.mockBserv = NewMockBlockServer(c) config.SetBlockServer(config.mockBserv) config.mockBsplit = NewMockBlockSplitter(c) config.SetBlockSplitter(config.mockBsplit) config.mockNotifier = NewMockNotifier(c) config.SetNotifier(config.mockNotifier) config.mockClock = NewMockClock(c) config.SetClock(config.mockClock) config.mockRekeyQueue = NewMockRekeyQueue(c) config.SetRekeyQueue(config.mockRekeyQueue) config.observer = &FakeObserver{} config.ctr = ctr config.SetLoggerMaker(func(m string) logger.Logger { return logger.NewTestLogger(ctr.t) }) // turn off background flushing by default during tests config.noBGFlush = true config.maxFileBytes = maxFileBytesDefault config.maxNameBytes = maxNameBytesDefault config.maxDirBytes = maxDirBytesDefault config.rwpWaitTime = rekeyWithPromptWaitTimeDefault config.qrPeriod = 0 * time.Second // no auto reclamation config.qrUnrefAge = qrUnrefAgeDefault return config } // CheckStateOnShutdown implements the Config interface for ConfigLocal. func (c *ConfigMock) CheckStateOnShutdown() bool { return false }
1
11,333
this reads like "mock database cache", maybe a clearer name
keybase-kbfs
go
@@ -26,6 +26,8 @@ import ( "net/http" "time" + "github.com/jetstack/cert-manager/pkg/webhook/server/util" + logf "github.com/jetstack/cert-manager/pkg/logs" "github.com/go-logr/logr"
1
/* Copyright 2019 The Jetstack cert-manager contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package server import ( "context" "crypto/tls" "errors" "fmt" "io/ioutil" "net" "net/http" "time" logf "github.com/jetstack/cert-manager/pkg/logs" "github.com/go-logr/logr" admissionv1 "k8s.io/api/admission/v1" admissionv1beta1 "k8s.io/api/admission/v1beta1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/json" ciphers "k8s.io/component-base/cli/flag" crlog "sigs.k8s.io/controller-runtime/pkg/log" "github.com/jetstack/cert-manager/pkg/util/profiling" "github.com/jetstack/cert-manager/pkg/webhook/handlers" servertls "github.com/jetstack/cert-manager/pkg/webhook/server/tls" ) var ( // defaultScheme is used to encode and decode the AdmissionReview and // ConversionReview resources submitted to the webhook server. // It is not used for performing validation, mutation or conversion. defaultScheme = runtime.NewScheme() ) func init() { admissionv1beta1.AddToScheme(defaultScheme) admissionv1.AddToScheme(defaultScheme) apiextensionsv1beta1.AddToScheme(defaultScheme) apiextensionsv1.AddToScheme(defaultScheme) // we need to add the options to empty v1 // TODO fix the server code to avoid this metav1.AddToGroupVersion(defaultScheme, schema.GroupVersion{Version: "v1"}) // TODO: keep the generic API server from wanting this unversioned := schema.GroupVersion{Group: "", Version: "v1"} defaultScheme.AddUnversionedTypes(unversioned, &metav1.Status{}, &metav1.APIVersions{}, &metav1.APIGroupList{}, &metav1.APIGroup{}, &metav1.APIResourceList{}, ) } type Server struct { // ListenAddr is the address the HTTP server should listen on // This must be specified. ListenAddr string // HealthzAddr is the address the healthz HTTP server should listen on // If not specified, the healthz endpoint will not be exposed. HealthzAddr string // EnablePprof controls whether net/http/pprof handlers are registered with // the HTTP listener. EnablePprof bool // Scheme is used to decode/encode request/response payloads. // If not specified, a default scheme that registers the AdmissionReview // and ConversionReview resource types will be used. // It is not used for performing validation, mutation or conversion. Scheme *runtime.Scheme // If specified, the server will listen with TLS using certificates // provided by this CertificateSource. CertificateSource servertls.CertificateSource ValidationWebhook handlers.ValidatingAdmissionHook MutationWebhook handlers.MutatingAdmissionHook ConversionWebhook handlers.ConversionHook // Log is an optional logger to write informational and error messages to. // If not specified, no messages will be logged. Log logr.Logger // CipherSuites is the list of allowed cipher suites for the server. // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). CipherSuites []string // MinTLSVersion is the minimum TLS version supported. // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). MinTLSVersion string listener net.Listener } func (s *Server) Run(stopCh <-chan struct{}) error { if s.Log == nil { s.Log = crlog.NullLogger{} } internalStopCh := make(chan struct{}) // only close the internalStopCh if it hasn't already been closed shutdown := false defer func() { if !shutdown { close(internalStopCh) } }() var healthzChan <-chan error var certSourceChan <-chan error // if a HealthzAddr is provided, start the healthz listener if s.HealthzAddr != "" { l, err := net.Listen("tcp", s.HealthzAddr) if err != nil { return err } mux := http.NewServeMux() mux.HandleFunc("/healthz", s.handleHealthz) mux.HandleFunc("/livez", s.handleLivez) s.Log.V(logf.InfoLevel).Info("listening for insecure healthz connections", "address", s.HealthzAddr) healthzChan = s.startServer(l, internalStopCh, mux) } // create a listener for actual webhook requests l, err := net.Listen("tcp", s.ListenAddr) if err != nil { return err } s.listener = l // wrap the listener with TLS if a CertificateSource is provided if s.CertificateSource != nil { s.Log.V(logf.InfoLevel).Info("listening for secure connections", "address", s.ListenAddr) certSourceChan = s.startCertificateSource(internalStopCh) cipherSuites, err := ciphers.TLSCipherSuites(s.CipherSuites) if err != nil { return err } minVersion, err := ciphers.TLSVersion(s.MinTLSVersion) if err != nil { return err } l = tls.NewListener(l, &tls.Config{ GetCertificate: s.CertificateSource.GetCertificate, CipherSuites: cipherSuites, MinVersion: minVersion, PreferServerCipherSuites: true, }) } else { s.Log.V(logf.InfoLevel).Info("listening for insecure connections", "address", s.ListenAddr) } mux := http.NewServeMux() mux.HandleFunc("/validate", s.handle(s.validate)) mux.HandleFunc("/mutate", s.handle(s.mutate)) mux.HandleFunc("/convert", s.handle(s.convert)) if s.EnablePprof { profiling.Install(mux) s.Log.V(logf.InfoLevel).Info("registered pprof handlers") } listenerChan := s.startServer(l, internalStopCh, mux) if certSourceChan == nil { certSourceChan = blockingChan(internalStopCh) } if healthzChan == nil { healthzChan = blockingChan(internalStopCh) } select { case err = <-healthzChan: case err = <-certSourceChan: case err = <-listenerChan: case <-stopCh: } close(internalStopCh) shutdown = true s.Log.V(logf.DebugLevel).Info("waiting for server to shutdown") waitForAll(healthzChan, certSourceChan, listenerChan) s.Log.V(logf.InfoLevel).Info("server shutdown successfully") return err } // Port returns the port number that the webhook listener is listening on func (s *Server) Port() (int, error) { if s.listener == nil { return 0, errors.New("Run() must be called before Port()") } tcpAddr, ok := s.listener.Addr().(*net.TCPAddr) if !ok { return 0, errors.New("unexpected listen address type (expected tcp)") } return tcpAddr.Port, nil } func (s *Server) startServer(l net.Listener, stopCh <-chan struct{}, handle http.Handler) <-chan error { ch := make(chan error) go func() { defer close(ch) srv := &http.Server{ Handler: handle, } select { case err := <-channelWrapper(func() error { return srv.Serve(l) }): ch <- err case <-stopCh: // allow a fixed 5s for graceful shutdown ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() if err := srv.Shutdown(ctx); err != nil { s.Log.Error(err, "failed to gracefully shutdown http server") ch <- err } s.Log.V(logf.DebugLevel).Info("shutdown HTTP server gracefully") } }() return ch } func (s *Server) startCertificateSource(stopCh <-chan struct{}) <-chan error { fn := func() error { return s.CertificateSource.Run(stopCh) } return channelWrapper(fn) } func waitForAll(chs ...<-chan error) error { for _, ch := range chs { if err := <-ch; err != nil { return fmt.Errorf("error waiting for goroutine to exit: %w", err) } } return nil } func channelWrapper(fn func() error) <-chan error { ch := make(chan error) go func() { defer close(ch) ch <- fn() }() return ch } // blockingChan returns a 'no-op' error channel. // When stopCh is closed, the error channel will also be closed. func blockingChan(stopCh <-chan struct{}) <-chan error { ch := make(chan error) go func() { defer close(ch) <-stopCh }() return ch } func (s *Server) scheme() *runtime.Scheme { if s.Scheme == nil { return defaultScheme } return s.Scheme } func (s *Server) validate(obj runtime.Object) (runtime.Object, error) { outputVersion := admissionv1.SchemeGroupVersion review, isV1 := obj.(*admissionv1.AdmissionReview) if !isV1 { outputVersion = admissionv1beta1.SchemeGroupVersion reviewv1beta1 := obj.(*admissionv1beta1.AdmissionReview) convertedReview, err := defaultScheme.ConvertToVersion(reviewv1beta1, admissionv1.SchemeGroupVersion) if err != nil { return nil, err } review = convertedReview.(*admissionv1.AdmissionReview) } resp := s.ValidationWebhook.Validate(review.Request) review.Response = resp versionedOutput, err := defaultScheme.ConvertToVersion(review, outputVersion) return versionedOutput, err } func (s *Server) mutate(obj runtime.Object) (runtime.Object, error) { outputVersion := admissionv1.SchemeGroupVersion review, isV1 := obj.(*admissionv1.AdmissionReview) if !isV1 { outputVersion = admissionv1beta1.SchemeGroupVersion reviewv1beta1 := obj.(*admissionv1beta1.AdmissionReview) convertedReview, err := defaultScheme.ConvertToVersion(reviewv1beta1, admissionv1.SchemeGroupVersion) if err != nil { return nil, err } review = convertedReview.(*admissionv1.AdmissionReview) } resp := s.MutationWebhook.Mutate(review.Request) review.Response = resp versionedOutput, err := defaultScheme.ConvertToVersion(review, outputVersion) return versionedOutput, err } func (s *Server) convert(obj runtime.Object) (runtime.Object, error) { outputVersion := apiextensionsv1.SchemeGroupVersion review, isV1 := obj.(*apiextensionsv1.ConversionReview) if !isV1 { outputVersion = apiextensionsv1beta1.SchemeGroupVersion reviewv1beta1, isV1beta1 := obj.(*admissionv1beta1.AdmissionReview) if !isV1beta1 { return nil, errors.New("request is not of type apiextensions v1 or v1beta1") } convertedReview, err := defaultScheme.ConvertToVersion(reviewv1beta1, admissionv1.SchemeGroupVersion) if err != nil { return nil, err } review = convertedReview.(*apiextensionsv1.ConversionReview) } resp := s.ConversionWebhook.Convert(review.Request) review.Response = resp versionedOutput, err := defaultScheme.ConvertToVersion(review, outputVersion) return versionedOutput, err } func (s *Server) handle(inner func(runtime.Object) (runtime.Object, error)) func(w http.ResponseWriter, req *http.Request) { return func(w http.ResponseWriter, req *http.Request) { defer req.Body.Close() data, err := ioutil.ReadAll(req.Body) if err != nil { s.Log.Error(err, "failed to read request body") w.WriteHeader(http.StatusBadRequest) return } codec := json.NewSerializerWithOptions(json.DefaultMetaFactory, s.scheme(), s.scheme(), json.SerializerOptions{ Pretty: true, }) codec.Decode(data, nil, nil) obj, _, err := codec.Decode(data, nil, nil) if err != nil { s.Log.Error(err, "failed to decode request body") w.WriteHeader(http.StatusBadRequest) return } result, err := inner(obj) if err != nil { s.Log.Error(err, "failed to process webhook request") w.WriteHeader(http.StatusInternalServerError) return } if err := codec.Encode(result, w); err != nil { s.Log.Error(err, "failed to encode response body") w.WriteHeader(http.StatusInternalServerError) return } } } func (s *Server) handleHealthz(w http.ResponseWriter, req *http.Request) { defer req.Body.Close() if s.CertificateSource != nil && !s.CertificateSource.Healthy() { s.Log.V(logf.WarnLevel).Info("Health check failed as CertificateSource is unhealthy") w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) } func (s *Server) handleLivez(w http.ResponseWriter, req *http.Request) { defer req.Body.Close() w.WriteHeader(http.StatusOK) }
1
23,008
Nit: move this with the other CM imports.
jetstack-cert-manager
go
@@ -2,9 +2,10 @@ namespace Shopsys\ShopBundle\Controller\Front; +use League\Flysystem\FilesystemInterface; use Shopsys\FrameworkBundle\Component\Image\Config\ImageConfig; use Shopsys\FrameworkBundle\Component\Image\Processing\ImageGeneratorFacade; -use Symfony\Component\HttpFoundation\BinaryFileResponse; +use Symfony\Component\HttpFoundation\StreamedResponse; class ImageController extends FrontBaseController {
1
<?php namespace Shopsys\ShopBundle\Controller\Front; use Shopsys\FrameworkBundle\Component\Image\Config\ImageConfig; use Shopsys\FrameworkBundle\Component\Image\Processing\ImageGeneratorFacade; use Symfony\Component\HttpFoundation\BinaryFileResponse; class ImageController extends FrontBaseController { /** * @var \Shopsys\FrameworkBundle\Component\Image\Processing\ImageGeneratorFacade */ private $imageGeneratorFacade; public function __construct(ImageGeneratorFacade $imageGeneratorFacade) { $this->imageGeneratorFacade = $imageGeneratorFacade; } public function getImageAction($entityName, $type, $sizeName, $imageId) { if ($sizeName === ImageConfig::DEFAULT_SIZE_NAME) { $sizeName = null; } try { $imageFilepath = $this->imageGeneratorFacade->generateImageAndGetFilepath($entityName, $imageId, $type, $sizeName); } catch (\Shopsys\FrameworkBundle\Component\Image\Exception\ImageException $e) { $message = 'Generate image for entity "' . $entityName . '" (type=' . $type . ', size=' . $sizeName . ', imageId=' . $imageId . ') failed.'; throw $this->createNotFoundException($message, $e); } try { return new BinaryFileResponse($imageFilepath); } catch (\Symfony\Component\HttpFoundation\File\Exception\FileException $e) { $message = 'Response with file "' . $imageFilepath . '" failed.'; throw $this->createNotFoundException($message, $e); } } }
1
10,795
This change should be mentioned in the CM
shopsys-shopsys
php
@@ -23,6 +23,13 @@ namespace Datadog.Trace HttpHeaderNames.DatadogTags, }; + /// <summary> + /// An <see cref="ISpanContext"/> with default values. Can be used as the value for + /// <see cref="SpanCreationSettings.Parent"/> in <see cref="Tracer.StartActive(string, SpanCreationSettings)"/> + /// to specify that the new span should not inherit the currently active scope as its parent. + /// </summary> + public static readonly ISpanContext Empty = new ReadOnlySpanContext(traceId: 0, spanId: 0, serviceName: null); + /// <summary> /// Initializes a new instance of the <see cref="SpanContext"/> class /// from a propagated context. <see cref="Parent"/> will be null
1
// <copyright file="SpanContext.cs" company="Datadog"> // Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License. // This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc. // </copyright> using System.Collections; using System.Collections.Generic; using Datadog.Trace.Util; namespace Datadog.Trace { /// <summary> /// The SpanContext contains all the information needed to express relationships between spans inside or outside the process boundaries. /// </summary> public class SpanContext : ISpanContext, IReadOnlyDictionary<string, string> { private static readonly string[] KeyNames = { HttpHeaderNames.TraceId, HttpHeaderNames.ParentId, HttpHeaderNames.SamplingPriority, HttpHeaderNames.Origin, HttpHeaderNames.DatadogTags, }; /// <summary> /// Initializes a new instance of the <see cref="SpanContext"/> class /// from a propagated context. <see cref="Parent"/> will be null /// since this is a root context locally. /// </summary> /// <param name="traceId">The propagated trace id.</param> /// <param name="spanId">The propagated span id.</param> /// <param name="samplingPriority">The propagated sampling priority.</param> /// <param name="serviceName">The service name to propagate to child spans.</param> public SpanContext(ulong? traceId, ulong spanId, SamplingPriority? samplingPriority = null, string serviceName = null) : this(traceId, serviceName) { SpanId = spanId; SamplingPriority = samplingPriority; } /// <summary> /// Initializes a new instance of the <see cref="SpanContext"/> class /// from a propagated context. <see cref="Parent"/> will be null /// since this is a root context locally. /// </summary> /// <param name="traceId">The propagated trace id.</param> /// <param name="spanId">The propagated span id.</param> /// <param name="samplingPriority">The propagated sampling priority.</param> /// <param name="serviceName">The service name to propagate to child spans.</param> /// <param name="origin">The propagated origin of the trace.</param> internal SpanContext(ulong? traceId, ulong spanId, SamplingPriority? samplingPriority, string serviceName, string origin) : this(traceId, serviceName) { SpanId = spanId; SamplingPriority = samplingPriority; Origin = origin; } /// <summary> /// Initializes a new instance of the <see cref="SpanContext"/> class /// that is the child of the specified parent context. /// </summary> /// <param name="parent">The parent context.</param> /// <param name="traceContext">The trace context.</param> /// <param name="serviceName">The service name to propagate to child spans.</param> /// <param name="traceId">Override the trace id if there's no parent.</param> /// <param name="spanId">The propagated span id.</param> internal SpanContext(ISpanContext parent, TraceContext traceContext, string serviceName, ulong? traceId = null, ulong? spanId = null) : this(parent?.TraceId ?? traceId, serviceName) { SpanId = spanId ?? SpanIdGenerator.ThreadInstance.CreateNew(); Parent = parent; TraceContext = traceContext; if (parent is SpanContext spanContext) { Origin = spanContext.Origin; } } private SpanContext(ulong? traceId, string serviceName) { TraceId = traceId > 0 ? traceId.Value : SpanIdGenerator.ThreadInstance.CreateNew(); ServiceName = serviceName; } /// <summary> /// Gets the parent context. /// </summary> public ISpanContext Parent { get; } /// <summary> /// Gets the trace id /// </summary> public ulong TraceId { get; } /// <summary> /// Gets the span id of the parent span /// </summary> public ulong? ParentId => Parent?.SpanId; /// <summary> /// Gets the span id /// </summary> public ulong SpanId { get; } /// <summary> /// Gets or sets the service name to propagate to child spans. /// </summary> public string ServiceName { get; set; } /// <summary> /// Gets or sets the origin of the trace /// </summary> internal string Origin { get; set; } /// <summary> /// Gets or sets a collection of propagated internal Datadog tags, /// formatted as "key1=value1,key2=value2". /// </summary> /// <remarks> /// We're keeping this as the string representation to avoid having to parse. /// For now, it's relatively easy to append new values when needed. /// </remarks> internal string DatadogTags { get; set; } /// <summary> /// Gets the trace context. /// Returns null for contexts created from incoming propagated context. /// </summary> internal TraceContext TraceContext { get; } /// <summary> /// Gets the sampling priority for contexts created from incoming propagated context. /// Returns null for local contexts. /// </summary> internal SamplingPriority? SamplingPriority { get; } /// <inheritdoc/> int IReadOnlyCollection<KeyValuePair<string, string>>.Count => KeyNames.Length; /// <inheritdoc /> IEnumerable<string> IReadOnlyDictionary<string, string>.Keys => KeyNames; /// <inheritdoc/> IEnumerable<string> IReadOnlyDictionary<string, string>.Values { get { foreach (var key in KeyNames) { yield return ((IReadOnlyDictionary<string, string>)this)[key]; } } } /// <inheritdoc/> string IReadOnlyDictionary<string, string>.this[string key] { get { if (((IReadOnlyDictionary<string, string>)this).TryGetValue(key, out var value)) { return value; } ThrowHelper.ThrowKeyNotFoundException($"Key not found: {key}"); return default; } } /// <inheritdoc/> IEnumerator<KeyValuePair<string, string>> IEnumerable<KeyValuePair<string, string>>.GetEnumerator() { var dictionary = (IReadOnlyDictionary<string, string>)this; foreach (var key in KeyNames) { yield return new KeyValuePair<string, string>(key, dictionary[key]); } } /// <inheritdoc/> IEnumerator IEnumerable.GetEnumerator() { return ((IReadOnlyDictionary<string, string>)this).GetEnumerator(); } /// <inheritdoc/> bool IReadOnlyDictionary<string, string>.ContainsKey(string key) { foreach (var k in KeyNames) { if (k == key) { return true; } } return false; } /// <inheritdoc/> bool IReadOnlyDictionary<string, string>.TryGetValue(string key, out string value) { switch (key) { case HttpHeaderNames.TraceId: value = TraceId.ToString(); return true; case HttpHeaderNames.ParentId: value = SpanId.ToString(); return true; case HttpHeaderNames.SamplingPriority: var samplingPriority = SamplingPriority; value = samplingPriority != null ? ((int)samplingPriority.Value).ToString() : null; return true; case HttpHeaderNames.Origin: value = Origin; return true; case HttpHeaderNames.DatadogTags: value = DatadogTags; return true; default: value = null; return false; } } } }
1
26,177
Do we really want a new type, or can we just use `SpanContext`? The `ISpanContext` interface is read-only, but there's nothing stopping users from casting this to `SpanContext` and modifying it.
DataDog-dd-trace-dotnet
.cs
@@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // </copyright> -#if NET461 +#if NET452 || NET461 using System; using System.Collections; using System.Collections.Generic;
1
// <copyright file="HttpWebRequestActivitySource.net461.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> #if NET461 using System; using System.Collections; using System.Collections.Generic; using System.Diagnostics; using System.Net; using System.Reflection; using System.Reflection.Emit; using System.Runtime.CompilerServices; using System.Text; using OpenTelemetry.Trace; namespace OpenTelemetry.Instrumentation.Dependencies.Implementation { /// <summary> /// Hooks into the <see cref="HttpWebRequest"/> class reflectively and writes diagnostic events as requests are processed. /// </summary> /// <remarks> /// Inspired from the System.Diagnostics.DiagnosticSource.HttpHandlerDiagnosticListener class which has some bugs and feature gaps. /// See https://github.com/dotnet/runtime/pull/33732 for details. /// </remarks> internal sealed class HttpWebRequestActivitySource { internal const string ActivitySourceName = "HttpWebRequest"; internal const string ActivityName = ActivitySourceName + ".HttpRequestOut"; internal static readonly HttpWebRequestActivitySource Instance = new HttpWebRequestActivitySource(); private const string CorrelationContextHeaderName = "Correlation-Context"; private const string TraceParentHeaderName = "traceparent"; private const string TraceStateHeaderName = "tracestate"; private static readonly Version Version = typeof(HttpWebRequestActivitySource).Assembly.GetName().Version; private static readonly ActivitySource WebRequestActivitySource = new ActivitySource(ActivitySourceName, Version.ToString()); // Fields for reflection private static FieldInfo connectionGroupListField; private static Type connectionGroupType; private static FieldInfo connectionListField; private static Type connectionType; private static FieldInfo writeListField; private static Func<object, IAsyncResult> writeAResultAccessor; private static Func<object, IAsyncResult> readAResultAccessor; // LazyAsyncResult & ContextAwareResult private static Func<object, AsyncCallback> asyncCallbackAccessor; private static Action<object, AsyncCallback> asyncCallbackModifier; private static Func<object, object> asyncStateAccessor; private static Action<object, object> asyncStateModifier; private static Func<object, bool> endCalledAccessor; private static Func<object, object> resultAccessor; private static Func<object, bool> isContextAwareResultChecker; // HttpWebResponse private static Func<object[], HttpWebResponse> httpWebResponseCtor; private static Func<HttpWebResponse, Uri> uriAccessor; private static Func<HttpWebResponse, object> verbAccessor; private static Func<HttpWebResponse, string> mediaTypeAccessor; private static Func<HttpWebResponse, bool> usesProxySemanticsAccessor; private static Func<HttpWebResponse, object> coreResponseDataAccessor; private static Func<HttpWebResponse, bool> isWebSocketResponseAccessor; private static Func<HttpWebResponse, string> connectionGroupNameAccessor; internal HttpWebRequestActivitySource() { try { PrepareReflectionObjects(); PerformInjection(); } catch (Exception ex) { // If anything went wrong, just no-op. Write an event so at least we can find out. InstrumentationEventSource.Log.ExceptionInitializingInstrumentation(typeof(HttpWebRequestActivitySource).FullName, ex); } } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static void AddRequestTagsAndInstrumentRequest(HttpWebRequest request, Activity activity) { activity.DisplayName = HttpTagHelper.GetOperationNameForHttpMethod(request.Method); InstrumentRequest(request, activity); activity.SetCustomProperty("HttpWebRequest.Request", request); if (activity.IsAllDataRequested) { activity.AddTag(SpanAttributeConstants.ComponentKey, "http"); activity.AddTag(SpanAttributeConstants.HttpMethodKey, request.Method); activity.AddTag(SpanAttributeConstants.HttpHostKey, HttpTagHelper.GetHostTagValueFromRequestUri(request.RequestUri)); activity.AddTag(SpanAttributeConstants.HttpUrlKey, request.RequestUri.OriginalString); activity.AddTag(SpanAttributeConstants.HttpFlavorKey, HttpTagHelper.GetFlavorTagValueFromProtocolVersion(request.ProtocolVersion)); } } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static void AddResponseTags(HttpWebResponse response, Activity activity) { activity.SetCustomProperty("HttpWebRequest.Response", response); if (activity.IsAllDataRequested) { activity.AddTag(SpanAttributeConstants.HttpStatusCodeKey, HttpTagHelper.GetStatusCodeTagValueFromHttpStatusCode(response.StatusCode)); Status status = SpanHelper.ResolveSpanStatusForHttpStatusCode((int)response.StatusCode); activity.AddTag(SpanAttributeConstants.StatusCodeKey, SpanHelper.GetCachedCanonicalCodeString(status.CanonicalCode)); activity.AddTag(SpanAttributeConstants.StatusDescriptionKey, response.StatusDescription); } } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static void AddExceptionTags(Exception exception, Activity activity) { activity.SetCustomProperty("HttpWebRequest.Exception", exception); if (!activity.IsAllDataRequested) { return; } Status status; if (exception is WebException wexc) { if (wexc.Response is HttpWebResponse response) { activity.AddTag(SpanAttributeConstants.HttpStatusCodeKey, HttpTagHelper.GetStatusCodeTagValueFromHttpStatusCode(response.StatusCode)); status = SpanHelper.ResolveSpanStatusForHttpStatusCode((int)response.StatusCode).WithDescription(response.StatusDescription); } else { switch (wexc.Status) { case WebExceptionStatus.Timeout: status = Status.DeadlineExceeded; break; case WebExceptionStatus.NameResolutionFailure: status = Status.InvalidArgument.WithDescription(exception.Message); break; case WebExceptionStatus.SendFailure: case WebExceptionStatus.ConnectFailure: case WebExceptionStatus.SecureChannelFailure: case WebExceptionStatus.TrustFailure: status = Status.FailedPrecondition.WithDescription(exception.Message); break; case WebExceptionStatus.ServerProtocolViolation: status = Status.Unimplemented.WithDescription(exception.Message); break; case WebExceptionStatus.RequestCanceled: status = Status.Cancelled; break; case WebExceptionStatus.MessageLengthLimitExceeded: status = Status.ResourceExhausted.WithDescription(exception.Message); break; default: status = Status.Unknown.WithDescription(exception.Message); break; } } } else { status = Status.Unknown.WithDescription(exception.Message); } activity.AddTag(SpanAttributeConstants.StatusCodeKey, SpanHelper.GetCachedCanonicalCodeString(status.CanonicalCode)); activity.AddTag(SpanAttributeConstants.StatusDescriptionKey, status.Description); } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static void InstrumentRequest(HttpWebRequest request, Activity activity) { // do not inject header if it was injected already // perhaps tracing systems wants to override it if (request.Headers.Get(TraceParentHeaderName) == null) { request.Headers.Add(TraceParentHeaderName, activity.Id); string traceState = activity.TraceStateString; if (traceState != null) { request.Headers.Add(TraceStateHeaderName, traceState); } } if (request.Headers.Get(CorrelationContextHeaderName) == null) { // we expect baggage to be empty or contain a few items using IEnumerator<KeyValuePair<string, string>> e = activity.Baggage.GetEnumerator(); if (e.MoveNext()) { StringBuilder baggage = new StringBuilder(); do { KeyValuePair<string, string> item = e.Current; baggage.Append(WebUtility.UrlEncode(item.Key)).Append('=').Append(WebUtility.UrlEncode(item.Value)).Append(','); } while (e.MoveNext()); baggage.Remove(baggage.Length - 1, 1); request.Headers.Add(CorrelationContextHeaderName, baggage.ToString()); } } } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static bool IsRequestInstrumented(HttpWebRequest request) => request.Headers.Get(TraceParentHeaderName) != null; private static void ProcessRequest(HttpWebRequest request) { if (!WebRequestActivitySource.HasListeners() || IsRequestInstrumented(request)) { // No subscribers to the ActivitySource or this request was instrumented by previous // ProcessRequest, such is the case with redirect responses where the same request is sent again. return; } var activity = WebRequestActivitySource.StartActivity(ActivityName, ActivityKind.Client); if (activity == null) { // There is a listener but it decided not to sample the current request. return; } IAsyncResult asyncContext = writeAResultAccessor(request); if (asyncContext != null) { // Flow here is for [Begin]GetRequestStream[Async]. AsyncCallbackWrapper callback = new AsyncCallbackWrapper(request, activity, asyncCallbackAccessor(asyncContext)); asyncCallbackModifier(asyncContext, callback.AsyncCallback); } else { // Flow here is for [Begin]GetResponse[Async] without a prior call to [Begin]GetRequestStream[Async]. asyncContext = readAResultAccessor(request); AsyncCallbackWrapper callback = new AsyncCallbackWrapper(request, activity, asyncCallbackAccessor(asyncContext)); asyncCallbackModifier(asyncContext, callback.AsyncCallback); } AddRequestTagsAndInstrumentRequest(request, activity); } private static void HookOrProcessResult(HttpWebRequest request) { IAsyncResult writeAsyncContext = writeAResultAccessor(request); if (writeAsyncContext == null || !(asyncCallbackAccessor(writeAsyncContext)?.Target is AsyncCallbackWrapper writeAsyncContextCallback)) { // If we already hooked into the read result during ProcessRequest or we hooked up after the fact already we don't need to do anything here. return; } // If we got here it means the user called [Begin]GetRequestStream[Async] and we have to hook the read result after the fact. IAsyncResult readAsyncContext = readAResultAccessor(request); if (readAsyncContext == null) { // We're still trying to establish the connection (no read has started). return; } // Clear our saved callback so we know not to process again. asyncCallbackModifier(writeAsyncContext, null); if (endCalledAccessor.Invoke(readAsyncContext) || readAsyncContext.CompletedSynchronously) { // We need to process the result directly because the read callback has already fired. Force a copy because response has likely already been disposed. ProcessResult(readAsyncContext, null, writeAsyncContextCallback.Activity, resultAccessor(readAsyncContext), true); return; } // Hook into the result callback if it hasn't already fired. AsyncCallbackWrapper callback = new AsyncCallbackWrapper(writeAsyncContextCallback.Request, writeAsyncContextCallback.Activity, asyncCallbackAccessor(readAsyncContext)); asyncCallbackModifier(readAsyncContext, callback.AsyncCallback); } private static void ProcessResult(IAsyncResult asyncResult, AsyncCallback asyncCallback, Activity activity, object result, bool forceResponseCopy) { // We could be executing on a different thread now so set the activity. Debug.Assert(Activity.Current == null || Activity.Current == activity, "There was an unexpected active Activity on the result thread."); if (Activity.Current == null) { Activity.Current = activity; } try { if (result is Exception ex) { AddExceptionTags(ex, activity); } else { HttpWebResponse response = (HttpWebResponse)result; if (forceResponseCopy || (asyncCallback == null && isContextAwareResultChecker(asyncResult))) { // For async calls (where asyncResult is ContextAwareResult)... // If no callback was set assume the user is manually calling BeginGetResponse & EndGetResponse // in which case they could dispose the HttpWebResponse before our listeners have a chance to work with it. // Disposed HttpWebResponse throws when accessing properties, so let's make a copy of the data to ensure that doesn't happen. HttpWebResponse responseCopy = httpWebResponseCtor( new object[] { uriAccessor(response), verbAccessor(response), coreResponseDataAccessor(response), mediaTypeAccessor(response), usesProxySemanticsAccessor(response), DecompressionMethods.None, isWebSocketResponseAccessor(response), connectionGroupNameAccessor(response), }); AddResponseTags(responseCopy, activity); } else { AddResponseTags(response, activity); } } } catch { } activity.Stop(); } private static void PrepareReflectionObjects() { // At any point, if the operation failed, it should just throw. The caller should catch all exceptions and swallow. Type servicePointType = typeof(ServicePoint); Assembly systemNetHttpAssembly = servicePointType.Assembly; connectionGroupListField = servicePointType.GetField("m_ConnectionGroupList", BindingFlags.Instance | BindingFlags.NonPublic); connectionGroupType = systemNetHttpAssembly?.GetType("System.Net.ConnectionGroup"); connectionListField = connectionGroupType?.GetField("m_ConnectionList", BindingFlags.Instance | BindingFlags.NonPublic); connectionType = systemNetHttpAssembly?.GetType("System.Net.Connection"); writeListField = connectionType?.GetField("m_WriteList", BindingFlags.Instance | BindingFlags.NonPublic); writeAResultAccessor = CreateFieldGetter<IAsyncResult>(typeof(HttpWebRequest), "_WriteAResult", BindingFlags.NonPublic | BindingFlags.Instance); readAResultAccessor = CreateFieldGetter<IAsyncResult>(typeof(HttpWebRequest), "_ReadAResult", BindingFlags.NonPublic | BindingFlags.Instance); // Double checking to make sure we have all the pieces initialized if (connectionGroupListField == null || connectionGroupType == null || connectionListField == null || connectionType == null || writeListField == null || writeAResultAccessor == null || readAResultAccessor == null || !PrepareAsyncResultReflectionObjects(systemNetHttpAssembly) || !PrepareHttpWebResponseReflectionObjects(systemNetHttpAssembly)) { // If anything went wrong here, just return false. There is nothing we can do. throw new InvalidOperationException("Unable to initialize all required reflection objects"); } } private static bool PrepareAsyncResultReflectionObjects(Assembly systemNetHttpAssembly) { Type lazyAsyncResultType = systemNetHttpAssembly?.GetType("System.Net.LazyAsyncResult"); if (lazyAsyncResultType != null) { asyncCallbackAccessor = CreateFieldGetter<AsyncCallback>(lazyAsyncResultType, "m_AsyncCallback", BindingFlags.NonPublic | BindingFlags.Instance); asyncCallbackModifier = CreateFieldSetter<AsyncCallback>(lazyAsyncResultType, "m_AsyncCallback", BindingFlags.NonPublic | BindingFlags.Instance); asyncStateAccessor = CreateFieldGetter<object>(lazyAsyncResultType, "m_AsyncState", BindingFlags.NonPublic | BindingFlags.Instance); asyncStateModifier = CreateFieldSetter<object>(lazyAsyncResultType, "m_AsyncState", BindingFlags.NonPublic | BindingFlags.Instance); endCalledAccessor = CreateFieldGetter<bool>(lazyAsyncResultType, "m_EndCalled", BindingFlags.NonPublic | BindingFlags.Instance); resultAccessor = CreateFieldGetter<object>(lazyAsyncResultType, "m_Result", BindingFlags.NonPublic | BindingFlags.Instance); } Type contextAwareResultType = systemNetHttpAssembly?.GetType("System.Net.ContextAwareResult"); if (contextAwareResultType != null) { isContextAwareResultChecker = CreateTypeChecker(contextAwareResultType); } return asyncCallbackAccessor != null && asyncCallbackModifier != null && asyncStateAccessor != null && asyncStateModifier != null && endCalledAccessor != null && resultAccessor != null && isContextAwareResultChecker != null; } private static bool PrepareHttpWebResponseReflectionObjects(Assembly systemNetHttpAssembly) { Type knownHttpVerbType = systemNetHttpAssembly?.GetType("System.Net.KnownHttpVerb"); Type coreResponseData = systemNetHttpAssembly?.GetType("System.Net.CoreResponseData"); if (knownHttpVerbType != null && coreResponseData != null) { var constructorParameterTypes = new Type[] { typeof(Uri), knownHttpVerbType, coreResponseData, typeof(string), typeof(bool), typeof(DecompressionMethods), typeof(bool), typeof(string), }; ConstructorInfo ctor = typeof(HttpWebResponse).GetConstructor( BindingFlags.NonPublic | BindingFlags.Instance, null, constructorParameterTypes, null); if (ctor != null) { httpWebResponseCtor = CreateTypeInstance<HttpWebResponse>(ctor); } } uriAccessor = CreateFieldGetter<HttpWebResponse, Uri>("m_Uri", BindingFlags.NonPublic | BindingFlags.Instance); verbAccessor = CreateFieldGetter<HttpWebResponse, object>("m_Verb", BindingFlags.NonPublic | BindingFlags.Instance); mediaTypeAccessor = CreateFieldGetter<HttpWebResponse, string>("m_MediaType", BindingFlags.NonPublic | BindingFlags.Instance); usesProxySemanticsAccessor = CreateFieldGetter<HttpWebResponse, bool>("m_UsesProxySemantics", BindingFlags.NonPublic | BindingFlags.Instance); coreResponseDataAccessor = CreateFieldGetter<HttpWebResponse, object>("m_CoreResponseData", BindingFlags.NonPublic | BindingFlags.Instance); isWebSocketResponseAccessor = CreateFieldGetter<HttpWebResponse, bool>("m_IsWebSocketResponse", BindingFlags.NonPublic | BindingFlags.Instance); connectionGroupNameAccessor = CreateFieldGetter<HttpWebResponse, string>("m_ConnectionGroupName", BindingFlags.NonPublic | BindingFlags.Instance); return httpWebResponseCtor != null && uriAccessor != null && verbAccessor != null && mediaTypeAccessor != null && usesProxySemanticsAccessor != null && coreResponseDataAccessor != null && isWebSocketResponseAccessor != null && connectionGroupNameAccessor != null; } private static void PerformInjection() { FieldInfo servicePointTableField = typeof(ServicePointManager).GetField("s_ServicePointTable", BindingFlags.Static | BindingFlags.NonPublic); if (servicePointTableField == null) { // If anything went wrong here, just return false. There is nothing we can do. throw new InvalidOperationException("Unable to access the ServicePointTable field"); } Hashtable originalTable = servicePointTableField.GetValue(null) as Hashtable; ServicePointHashtable newTable = new ServicePointHashtable(originalTable ?? new Hashtable()); servicePointTableField.SetValue(null, newTable); } private static Func<TClass, TField> CreateFieldGetter<TClass, TField>(string fieldName, BindingFlags flags) where TClass : class { FieldInfo field = typeof(TClass).GetField(fieldName, flags); if (field != null) { string methodName = field.ReflectedType.FullName + ".get_" + field.Name; DynamicMethod getterMethod = new DynamicMethod(methodName, typeof(TField), new[] { typeof(TClass) }, true); ILGenerator generator = getterMethod.GetILGenerator(); generator.Emit(OpCodes.Ldarg_0); generator.Emit(OpCodes.Ldfld, field); generator.Emit(OpCodes.Ret); return (Func<TClass, TField>)getterMethod.CreateDelegate(typeof(Func<TClass, TField>)); } return null; } /// <summary> /// Creates getter for a field defined in private or internal type /// repesented with classType variable. /// </summary> private static Func<object, TField> CreateFieldGetter<TField>(Type classType, string fieldName, BindingFlags flags) { FieldInfo field = classType.GetField(fieldName, flags); if (field != null) { string methodName = classType.FullName + ".get_" + field.Name; DynamicMethod getterMethod = new DynamicMethod(methodName, typeof(TField), new[] { typeof(object) }, true); ILGenerator generator = getterMethod.GetILGenerator(); generator.Emit(OpCodes.Ldarg_0); generator.Emit(OpCodes.Castclass, classType); generator.Emit(OpCodes.Ldfld, field); generator.Emit(OpCodes.Ret); return (Func<object, TField>)getterMethod.CreateDelegate(typeof(Func<object, TField>)); } return null; } /// <summary> /// Creates setter for a field defined in private or internal type /// repesented with classType variable. /// </summary> private static Action<object, TField> CreateFieldSetter<TField>(Type classType, string fieldName, BindingFlags flags) { FieldInfo field = classType.GetField(fieldName, flags); if (field != null) { string methodName = classType.FullName + ".set_" + field.Name; DynamicMethod setterMethod = new DynamicMethod(methodName, null, new[] { typeof(object), typeof(TField) }, true); ILGenerator generator = setterMethod.GetILGenerator(); generator.Emit(OpCodes.Ldarg_0); generator.Emit(OpCodes.Castclass, classType); generator.Emit(OpCodes.Ldarg_1); generator.Emit(OpCodes.Stfld, field); generator.Emit(OpCodes.Ret); return (Action<object, TField>)setterMethod.CreateDelegate(typeof(Action<object, TField>)); } return null; } /// <summary> /// Creates an "is" method for the private or internal type. /// </summary> private static Func<object, bool> CreateTypeChecker(Type classType) { string methodName = classType.FullName + ".typeCheck"; DynamicMethod setterMethod = new DynamicMethod(methodName, typeof(bool), new[] { typeof(object) }, true); ILGenerator generator = setterMethod.GetILGenerator(); generator.Emit(OpCodes.Ldarg_0); generator.Emit(OpCodes.Isinst, classType); generator.Emit(OpCodes.Ldnull); generator.Emit(OpCodes.Cgt_Un); generator.Emit(OpCodes.Ret); return (Func<object, bool>)setterMethod.CreateDelegate(typeof(Func<object, bool>)); } /// <summary> /// Creates an instance of T using a private or internal ctor. /// </summary> private static Func<object[], T> CreateTypeInstance<T>(ConstructorInfo constructorInfo) { Type classType = typeof(T); string methodName = classType.FullName + ".ctor"; DynamicMethod setterMethod = new DynamicMethod(methodName, classType, new Type[] { typeof(object[]) }, true); ILGenerator generator = setterMethod.GetILGenerator(); ParameterInfo[] ctorParams = constructorInfo.GetParameters(); for (int i = 0; i < ctorParams.Length; i++) { generator.Emit(OpCodes.Ldarg_0); switch (i) { case 0: generator.Emit(OpCodes.Ldc_I4_0); break; case 1: generator.Emit(OpCodes.Ldc_I4_1); break; case 2: generator.Emit(OpCodes.Ldc_I4_2); break; case 3: generator.Emit(OpCodes.Ldc_I4_3); break; case 4: generator.Emit(OpCodes.Ldc_I4_4); break; case 5: generator.Emit(OpCodes.Ldc_I4_5); break; case 6: generator.Emit(OpCodes.Ldc_I4_6); break; case 7: generator.Emit(OpCodes.Ldc_I4_7); break; case 8: generator.Emit(OpCodes.Ldc_I4_8); break; default: generator.Emit(OpCodes.Ldc_I4, i); break; } generator.Emit(OpCodes.Ldelem_Ref); Type paramType = ctorParams[i].ParameterType; generator.Emit(paramType.IsValueType ? OpCodes.Unbox_Any : OpCodes.Castclass, paramType); } generator.Emit(OpCodes.Newobj, constructorInfo); generator.Emit(OpCodes.Ret); return (Func<object[], T>)setterMethod.CreateDelegate(typeof(Func<object[], T>)); } private class HashtableWrapper : Hashtable, IEnumerable { private readonly Hashtable table; internal HashtableWrapper(Hashtable table) : base() { this.table = table; } public override int Count => this.table.Count; public override bool IsReadOnly => this.table.IsReadOnly; public override bool IsFixedSize => this.table.IsFixedSize; public override bool IsSynchronized => this.table.IsSynchronized; public override object SyncRoot => this.table.SyncRoot; public override ICollection Keys => this.table.Keys; public override ICollection Values => this.table.Values; public override object this[object key] { get => this.table[key]; set => this.table[key] = value; } public override void Add(object key, object value) { this.table.Add(key, value); } public override void Clear() { this.table.Clear(); } public override bool Contains(object key) { return this.table.Contains(key); } public override bool ContainsKey(object key) { return this.table.ContainsKey(key); } public override bool ContainsValue(object key) { return this.table.ContainsValue(key); } public override void CopyTo(Array array, int arrayIndex) { this.table.CopyTo(array, arrayIndex); } public override object Clone() { return new HashtableWrapper((Hashtable)this.table.Clone()); } IEnumerator IEnumerable.GetEnumerator() { return this.table.GetEnumerator(); } public override IDictionaryEnumerator GetEnumerator() { return this.table.GetEnumerator(); } public override void Remove(object key) { this.table.Remove(key); } } /// <summary> /// Helper class used for ServicePointManager.s_ServicePointTable. The goal here is to /// intercept each new ServicePoint object being added to ServicePointManager.s_ServicePointTable /// and replace its ConnectionGroupList hashtable field. /// </summary> private sealed class ServicePointHashtable : HashtableWrapper { public ServicePointHashtable(Hashtable table) : base(table) { } public override object this[object key] { get => base[key]; set { if (value is WeakReference weakRef && weakRef.IsAlive) { if (weakRef.Target is ServicePoint servicePoint) { // Replace the ConnectionGroup hashtable inside this ServicePoint object, // which allows us to intercept each new ConnectionGroup object added under // this ServicePoint. Hashtable originalTable = connectionGroupListField.GetValue(servicePoint) as Hashtable; ConnectionGroupHashtable newTable = new ConnectionGroupHashtable(originalTable ?? new Hashtable()); connectionGroupListField.SetValue(servicePoint, newTable); } } base[key] = value; } } } /// <summary> /// Helper class used for ServicePoint.m_ConnectionGroupList. The goal here is to /// intercept each new ConnectionGroup object being added to ServicePoint.m_ConnectionGroupList /// and replace its m_ConnectionList arraylist field. /// </summary> private sealed class ConnectionGroupHashtable : HashtableWrapper { public ConnectionGroupHashtable(Hashtable table) : base(table) { } public override object this[object key] { get => base[key]; set { if (connectionGroupType.IsInstanceOfType(value)) { // Replace the Connection arraylist inside this ConnectionGroup object, // which allows us to intercept each new Connection object added under // this ConnectionGroup. ArrayList originalArrayList = connectionListField.GetValue(value) as ArrayList; ConnectionArrayList newArrayList = new ConnectionArrayList(originalArrayList ?? new ArrayList()); connectionListField.SetValue(value, newArrayList); } base[key] = value; } } } /// <summary> /// Helper class used to wrap the array list object. This class itself doesn't actually /// have the array elements, but rather access another array list that's given at /// construction time. /// </summary> private class ArrayListWrapper : ArrayList { private ArrayList list; internal ArrayListWrapper(ArrayList list) : base() { this.list = list; } public override int Capacity { get => this.list.Capacity; set => this.list.Capacity = value; } public override int Count => this.list.Count; public override bool IsReadOnly => this.list.IsReadOnly; public override bool IsFixedSize => this.list.IsFixedSize; public override bool IsSynchronized => this.list.IsSynchronized; public override object SyncRoot => this.list.SyncRoot; public override object this[int index] { get => this.list[index]; set => this.list[index] = value; } public override int Add(object value) { return this.list.Add(value); } public override void AddRange(ICollection c) { this.list.AddRange(c); } public override int BinarySearch(object value) { return this.list.BinarySearch(value); } public override int BinarySearch(object value, IComparer comparer) { return this.list.BinarySearch(value, comparer); } public override int BinarySearch(int index, int count, object value, IComparer comparer) { return this.list.BinarySearch(index, count, value, comparer); } public override void Clear() { this.list.Clear(); } public override object Clone() { return new ArrayListWrapper((ArrayList)this.list.Clone()); } public override bool Contains(object item) { return this.list.Contains(item); } public override void CopyTo(Array array) { this.list.CopyTo(array); } public override void CopyTo(Array array, int index) { this.list.CopyTo(array, index); } public override void CopyTo(int index, Array array, int arrayIndex, int count) { this.list.CopyTo(index, array, arrayIndex, count); } public override IEnumerator GetEnumerator() { return this.list.GetEnumerator(); } public override IEnumerator GetEnumerator(int index, int count) { return this.list.GetEnumerator(index, count); } public override int IndexOf(object value) { return this.list.IndexOf(value); } public override int IndexOf(object value, int startIndex) { return this.list.IndexOf(value, startIndex); } public override int IndexOf(object value, int startIndex, int count) { return this.list.IndexOf(value, startIndex, count); } public override void Insert(int index, object value) { this.list.Insert(index, value); } public override void InsertRange(int index, ICollection c) { this.list.InsertRange(index, c); } public override int LastIndexOf(object value) { return this.list.LastIndexOf(value); } public override int LastIndexOf(object value, int startIndex) { return this.list.LastIndexOf(value, startIndex); } public override int LastIndexOf(object value, int startIndex, int count) { return this.list.LastIndexOf(value, startIndex, count); } public override void Remove(object value) { this.list.Remove(value); } public override void RemoveAt(int index) { this.list.RemoveAt(index); } public override void RemoveRange(int index, int count) { this.list.RemoveRange(index, count); } public override void Reverse(int index, int count) { this.list.Reverse(index, count); } public override void SetRange(int index, ICollection c) { this.list.SetRange(index, c); } public override ArrayList GetRange(int index, int count) { return this.list.GetRange(index, count); } public override void Sort() { this.list.Sort(); } public override void Sort(IComparer comparer) { this.list.Sort(comparer); } public override void Sort(int index, int count, IComparer comparer) { this.list.Sort(index, count, comparer); } public override object[] ToArray() { return this.list.ToArray(); } public override Array ToArray(Type type) { return this.list.ToArray(type); } public override void TrimToSize() { this.list.TrimToSize(); } public ArrayList Swap() { ArrayList old = this.list; this.list = new ArrayList(old.Capacity); return old; } } /// <summary> /// Helper class used for ConnectionGroup.m_ConnectionList. The goal here is to /// intercept each new Connection object being added to ConnectionGroup.m_ConnectionList /// and replace its m_WriteList arraylist field. /// </summary> private sealed class ConnectionArrayList : ArrayListWrapper { public ConnectionArrayList(ArrayList list) : base(list) { } public override int Add(object value) { if (connectionType.IsInstanceOfType(value)) { // Replace the HttpWebRequest arraylist inside this Connection object, // which allows us to intercept each new HttpWebRequest object added under // this Connection. ArrayList originalArrayList = writeListField.GetValue(value) as ArrayList; HttpWebRequestArrayList newArrayList = new HttpWebRequestArrayList(originalArrayList ?? new ArrayList()); writeListField.SetValue(value, newArrayList); } return base.Add(value); } } /// <summary> /// Helper class used for Connection.m_WriteList. The goal here is to /// intercept all new HttpWebRequest objects being added to Connection.m_WriteList /// and notify the listener about the HttpWebRequest that's about to send a request. /// It also intercepts all HttpWebRequest objects that are about to get removed from /// Connection.m_WriteList as they have completed the request. /// </summary> private sealed class HttpWebRequestArrayList : ArrayListWrapper { public HttpWebRequestArrayList(ArrayList list) : base(list) { } public override int Add(object value) { // Add before firing events so if some user code cancels/aborts the request it will be found in the outstanding list. int index = base.Add(value); if (value is HttpWebRequest request) { ProcessRequest(request); } return index; } public override void RemoveAt(int index) { object request = this[index]; base.RemoveAt(index); if (request is HttpWebRequest webRequest) { HookOrProcessResult(webRequest); } } public override void Clear() { ArrayList oldList = this.Swap(); for (int i = 0; i < oldList.Count; i++) { if (oldList[i] is HttpWebRequest request) { HookOrProcessResult(request); } } } } /// <summary> /// A closure object so our state is available when our callback executes. /// </summary> private sealed class AsyncCallbackWrapper { public AsyncCallbackWrapper(HttpWebRequest request, Activity activity, AsyncCallback originalCallback) { this.Request = request; this.Activity = activity; this.OriginalCallback = originalCallback; } public HttpWebRequest Request { get; } public Activity Activity { get; } public AsyncCallback OriginalCallback { get; } public void AsyncCallback(IAsyncResult asyncResult) { object result = resultAccessor(asyncResult); if (result is Exception || result is HttpWebResponse) { ProcessResult(asyncResult, this.OriginalCallback, this.Activity, result, false); } this.OriginalCallback?.Invoke(asyncResult); } } } } #endif
1
14,231
Should we define a const like NETFRAMEWORK or NETFULL which will be set for NET452 NET461 and anything like NET47 etc in future?
open-telemetry-opentelemetry-dotnet
.cs
@@ -36,6 +36,12 @@ const PoStProofLength = 192 // https://github.com/filecoin-project/go-filecoin/issues/966 var ProvingPeriodBlocks = types.NewBlockHeight(20000) +// GracePeriodBlocks is the number of blocks after a proving period over +// which a miner can still submit a post at a penalty. +// TODO: what is a secure value for this? Value is arbitrary right now. +// See https://github.com/filecoin-project/go-filecoin/issues/1887 +var GracePeriodBlocks = types.NewBlockHeight(100) + const ( // ErrPublicKeyTooBig indicates an invalid public key. ErrPublicKeyTooBig = 33
1
package miner import ( "math/big" "os" "strconv" "gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid" cbor "gx/ipfs/QmRoARq3nkUb13HSKZGepCZSWe5GrVPwx7xURJGZ7KWv9V/go-ipld-cbor" xerrors "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" "gx/ipfs/QmY5Grm8pJdiSSVsYxx4uNRgweY72EmYwuSDbRnbFok3iY/go-libp2p-peer" "github.com/filecoin-project/go-filecoin/abi" "github.com/filecoin-project/go-filecoin/actor" "github.com/filecoin-project/go-filecoin/address" "github.com/filecoin-project/go-filecoin/exec" "github.com/filecoin-project/go-filecoin/proofs" "github.com/filecoin-project/go-filecoin/proofs/sectorbuilder" "github.com/filecoin-project/go-filecoin/types" "github.com/filecoin-project/go-filecoin/vm/errors" ) func init() { cbor.RegisterCborType(State{}) cbor.RegisterCborType(Ask{}) } // MaximumPublicKeySize is a limit on how big a public key can be. const MaximumPublicKeySize = 100 // PoStProofLength is the length of a single proof-of-spacetime proof (in bytes). const PoStProofLength = 192 // ProvingPeriodBlocks defines how long a proving period is for. // TODO: what is an actual workable value? currently set very high to avoid race conditions in test. // https://github.com/filecoin-project/go-filecoin/issues/966 var ProvingPeriodBlocks = types.NewBlockHeight(20000) const ( // ErrPublicKeyTooBig indicates an invalid public key. ErrPublicKeyTooBig = 33 // ErrInvalidSector indicates and invalid sector id. ErrInvalidSector = 34 // ErrSectorCommitted indicates the sector has already been committed. ErrSectorCommitted = 35 // ErrStoragemarketCallFailed indicates the call to commit the deal failed. ErrStoragemarketCallFailed = 36 // ErrCallerUnauthorized signals an unauthorized caller. ErrCallerUnauthorized = 37 // ErrInsufficientPledge signals insufficient pledge for what you are trying to do. ErrInsufficientPledge = 38 // ErrInvalidPoSt signals that the passed in PoSt was invalid. ErrInvalidPoSt = 39 // ErrAskNotFound indicates that no ask was found with the given ID. ErrAskNotFound = 40 // ErrInvalidSealProof signals that the passed in seal proof was invalid. ErrInvalidSealProof = 41 ) // Errors map error codes to revert errors this actor may return. var Errors = map[uint8]error{ ErrPublicKeyTooBig: errors.NewCodedRevertErrorf(ErrPublicKeyTooBig, "public key must be less than %d bytes", MaximumPublicKeySize), ErrInvalidSector: errors.NewCodedRevertErrorf(ErrInvalidSector, "sectorID out of range"), ErrSectorCommitted: errors.NewCodedRevertErrorf(ErrSectorCommitted, "sector already committed"), ErrStoragemarketCallFailed: errors.NewCodedRevertErrorf(ErrStoragemarketCallFailed, "call to StorageMarket failed"), ErrCallerUnauthorized: errors.NewCodedRevertErrorf(ErrCallerUnauthorized, "not authorized to call the method"), ErrInsufficientPledge: errors.NewCodedRevertErrorf(ErrInsufficientPledge, "not enough pledged"), ErrInvalidPoSt: errors.NewCodedRevertErrorf(ErrInvalidPoSt, "PoSt proof did not validate"), ErrAskNotFound: errors.NewCodedRevertErrorf(ErrAskNotFound, "no ask was found"), ErrInvalidSealProof: errors.NewCodedRevertErrorf(ErrInvalidSealProof, "seal proof was invalid"), } // Actor is the miner actor. // // If `Bootstrap` is `true`, the miner will not verify seal proofs. This is // useful when testing, as miners with non-zero power can be created using bogus // commitments. This is a temporary measure; we want to ultimately be able to // create a real genesis block whose miners are seeded with real commitments. // // The `Bootstrap` field must be set to `true` if the miner was created in the // genesis block. If the miner was created in any other block, `Bootstrap` must // be false. type Actor struct { Bootstrap bool } // Ask is a price advertisement by the miner type Ask struct { Price *types.AttoFIL Expiry *types.BlockHeight ID *big.Int } // State is the miner actors storage. type State struct { Owner address.Address // PeerID references the libp2p identity that the miner is operating. PeerID peer.ID // PublicKey is used to validate blocks generated by the miner this actor represents. PublicKey []byte // Pledge is amount the space being offered up by this miner. PledgeSectors *big.Int // Collateral is the total amount of filecoin being held as collateral for // the miners pledge. Collateral *types.AttoFIL // Asks is the set of asks this miner has open Asks []*Ask NextAskID *big.Int // SectorCommitments maps sector id to commitments, for all sectors this // miner has committed. Due to a bug in refmt, the sector id-keys need to be // stringified. // // See also: https://github.com/polydawn/refmt/issues/35 SectorCommitments map[string]types.Commitments LastUsedSectorID uint64 ProvingPeriodStart *types.BlockHeight LastPoSt *types.BlockHeight Power *big.Int } // NewActor returns a new miner actor func NewActor() *actor.Actor { return actor.NewActor(types.MinerActorCodeCid, types.NewZeroAttoFIL()) } // NewState creates a miner state struct func NewState(owner address.Address, key []byte, pledge *big.Int, pid peer.ID, collateral *types.AttoFIL) *State { return &State{ Owner: owner, PeerID: pid, PublicKey: key, PledgeSectors: pledge, Collateral: collateral, SectorCommitments: make(map[string]types.Commitments), Power: big.NewInt(0), NextAskID: big.NewInt(0), } } // InitializeState stores this miner's initial data structure. func (ma *Actor) InitializeState(storage exec.Storage, initializerData interface{}) error { minerState, ok := initializerData.(*State) if !ok { return errors.NewFaultError("Initial state to miner actor is not a miner.State struct") } // TODO: we should validate this is actually a public key (possibly the owner's public key) once we have a better // TODO: idea what crypto looks like. if len(minerState.PublicKey) > MaximumPublicKeySize { return Errors[ErrPublicKeyTooBig] } stateBytes, err := cbor.DumpObject(minerState) if err != nil { return xerrors.Wrap(err, "failed to cbor marshal object") } id, err := storage.Put(stateBytes) if err != nil { return err } return storage.Commit(id, cid.Undef) } var _ exec.ExecutableActor = (*Actor)(nil) var minerExports = exec.Exports{ "addAsk": &exec.FunctionSignature{ Params: []abi.Type{abi.AttoFIL, abi.Integer}, Return: []abi.Type{abi.Integer}, }, "getAsks": &exec.FunctionSignature{ Params: nil, Return: []abi.Type{abi.UintArray}, }, "getAsk": &exec.FunctionSignature{ Params: []abi.Type{abi.Integer}, Return: []abi.Type{abi.Bytes}, }, "getOwner": &exec.FunctionSignature{ Params: nil, Return: []abi.Type{abi.Address}, }, "getLastUsedSectorID": &exec.FunctionSignature{ Params: nil, Return: []abi.Type{abi.SectorID}, }, "commitSector": &exec.FunctionSignature{ Params: []abi.Type{abi.SectorID, abi.Bytes, abi.Bytes, abi.Bytes, abi.Bytes}, Return: []abi.Type{}, }, "getKey": &exec.FunctionSignature{ Params: []abi.Type{}, Return: []abi.Type{abi.Bytes}, }, "getPeerID": &exec.FunctionSignature{ Params: []abi.Type{}, Return: []abi.Type{abi.PeerID}, }, "updatePeerID": &exec.FunctionSignature{ Params: []abi.Type{abi.PeerID}, Return: []abi.Type{}, }, "getPledge": &exec.FunctionSignature{ Params: []abi.Type{}, Return: []abi.Type{abi.Integer}, }, "getPower": &exec.FunctionSignature{ Params: []abi.Type{}, Return: []abi.Type{abi.Integer}, }, "submitPoSt": &exec.FunctionSignature{ Params: []abi.Type{abi.Bytes}, Return: []abi.Type{}, }, "getProvingPeriodStart": &exec.FunctionSignature{ Params: []abi.Type{}, Return: []abi.Type{abi.BlockHeight}, }, "getSectorCommitments": &exec.FunctionSignature{ Params: nil, Return: []abi.Type{abi.CommitmentsMap}, }, } // Exports returns the miner actors exported functions. func (ma *Actor) Exports() exec.Exports { return minerExports } // AddAsk adds an ask to this miners ask list func (ma *Actor) AddAsk(ctx exec.VMContext, price *types.AttoFIL, expiry *big.Int) (*big.Int, uint8, error) { if err := ctx.Charge(100); err != nil { return nil, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas") } var state State out, err := actor.WithState(ctx, &state, func() (interface{}, error) { if ctx.Message().From != state.Owner { return nil, Errors[ErrCallerUnauthorized] } id := big.NewInt(0).Set(state.NextAskID) state.NextAskID = state.NextAskID.Add(state.NextAskID, big.NewInt(1)) // filter out expired asks asks := state.Asks state.Asks = state.Asks[:0] for _, a := range asks { if ctx.BlockHeight().LessThan(a.Expiry) { state.Asks = append(state.Asks, a) } } if !expiry.IsUint64() { return nil, errors.NewRevertError("expiry was invalid") } expiryBH := types.NewBlockHeight(expiry.Uint64()) state.Asks = append(state.Asks, &Ask{ Price: price, Expiry: ctx.BlockHeight().Add(expiryBH), ID: id, }) return id, nil }) if err != nil { return nil, errors.CodeError(err), err } askID, ok := out.(*big.Int) if !ok { return nil, 1, errors.NewRevertErrorf("expected an Integer return value from call, but got %T instead", out) } return askID, 0, nil } // GetAsks returns all the asks for this miner. (TODO: this isnt a great function signature, it returns the asks in a // serialized array. Consider doing this some other way) func (ma *Actor) GetAsks(ctx exec.VMContext) ([]uint64, uint8, error) { if err := ctx.Charge(100); err != nil { return nil, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas") } var state State out, err := actor.WithState(ctx, &state, func() (interface{}, error) { var askids []uint64 for _, ask := range state.Asks { if !ask.ID.IsUint64() { return nil, errors.NewFaultErrorf("miner ask has invalid ID (bad invariant)") } askids = append(askids, ask.ID.Uint64()) } return askids, nil }) if err != nil { return nil, errors.CodeError(err), err } askids, ok := out.([]uint64) if !ok { return nil, 1, errors.NewRevertErrorf("expected a []uint64 return value from call, but got %T instead", out) } return askids, 0, nil } // GetAsk returns an ask by ID func (ma *Actor) GetAsk(ctx exec.VMContext, askid *big.Int) ([]byte, uint8, error) { if err := ctx.Charge(100); err != nil { return nil, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas") } var state State out, err := actor.WithState(ctx, &state, func() (interface{}, error) { var ask *Ask for _, a := range state.Asks { if a.ID.Cmp(askid) == 0 { ask = a break } } if ask == nil { return nil, Errors[ErrAskNotFound] } out, err := cbor.DumpObject(ask) if err != nil { return nil, err } return out, nil }) if err != nil { return nil, errors.CodeError(err), err } ask, ok := out.([]byte) if !ok { return nil, 1, errors.NewRevertErrorf("expected a Bytes return value from call, but got %T instead", out) } return ask, 0, nil } // GetOwner returns the miners owner. func (ma *Actor) GetOwner(ctx exec.VMContext) (address.Address, uint8, error) { if err := ctx.Charge(100); err != nil { return address.Address{}, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas") } var state State out, err := actor.WithState(ctx, &state, func() (interface{}, error) { return state.Owner, nil }) if err != nil { return address.Address{}, errors.CodeError(err), err } a, ok := out.(address.Address) if !ok { return address.Address{}, 1, errors.NewFaultErrorf("expected an Address return value from call, but got %T instead", out) } return a, 0, nil } // GetLastUsedSectorID returns the last used sector id. func (ma *Actor) GetLastUsedSectorID(ctx exec.VMContext) (uint64, uint8, error) { if err := ctx.Charge(100); err != nil { return 0, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas") } var state State out, err := actor.WithState(ctx, &state, func() (interface{}, error) { return state.LastUsedSectorID, nil }) if err != nil { return 0, errors.CodeError(err), err } a, ok := out.(uint64) if !ok { return 0, 1, errors.NewFaultErrorf("expected a uint64 sector id, but got %T instead", out) } return a, 0, nil } // GetSectorCommitments returns all sector commitments posted by this miner. func (ma *Actor) GetSectorCommitments(ctx exec.VMContext) (map[string]types.Commitments, uint8, error) { if err := ctx.Charge(100); err != nil { return nil, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas") } var state State out, err := actor.WithState(ctx, &state, func() (interface{}, error) { return state.SectorCommitments, nil }) if err != nil { return map[string]types.Commitments{}, errors.CodeError(err), err } a, ok := out.(map[string]types.Commitments) if !ok { return map[string]types.Commitments{}, 1, errors.NewFaultErrorf("expected a map[string]types.Commitments, but got %T instead", out) } return a, 0, nil } // CommitSector adds a commitment to the specified sector. The sector must not // already be committed. func (ma *Actor) CommitSector(ctx exec.VMContext, sectorID uint64, commD, commR, commRStar, proof []byte) (uint8, error) { if err := ctx.Charge(100); err != nil { return exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas") } if len(commD) != int(proofs.CommitmentBytesLen) { return 1, errors.NewRevertError("invalid sized commD") } if len(commR) != int(proofs.CommitmentBytesLen) { return 1, errors.NewRevertError("invalid sized commR") } if len(commRStar) != int(proofs.CommitmentBytesLen) { return 1, errors.NewRevertError("invalid sized commRStar") } if !ma.Bootstrap { // This unfortunate environment variable-checking needs to happen because // the PoRep verification operation needs to know some things (e.g. size) // about the sector for which the proof was generated in order to verify. // // It is undefined behavior for a miner in "Live" mode to verify a proof // created by a miner in "ProofsTest" mode (and vice-versa). // sectorStoreType := proofs.Live if os.Getenv("FIL_USE_SMALL_SECTORS") == "true" { sectorStoreType = proofs.ProofTest } req := proofs.VerifySealRequest{} copy(req.CommD[:], commD) copy(req.CommR[:], commR) copy(req.CommRStar[:], commRStar) copy(req.Proof[:], proof) req.ProverID = sectorbuilder.AddressToProverID(ctx.Message().To) req.SectorID = sectorbuilder.SectorIDToBytes(sectorID) req.StoreType = sectorStoreType res, err := (&proofs.RustVerifier{}).VerifySeal(req) if err != nil { return 1, errors.RevertErrorWrap(err, "failed to verify seal proof") } if !res.IsValid { return ErrInvalidSealProof, Errors[ErrInvalidSealProof] } } // TODO: use uint64 instead of this abomination, once refmt is fixed // https://github.com/polydawn/refmt/issues/35 sectorIDstr := strconv.FormatUint(sectorID, 10) var state State _, err := actor.WithState(ctx, &state, func() (interface{}, error) { // verify that the caller is authorized to perform update if ctx.Message().From != state.Owner { return nil, Errors[ErrCallerUnauthorized] } _, ok := state.SectorCommitments[sectorIDstr] if ok { return nil, Errors[ErrSectorCommitted] } if state.Power.Cmp(big.NewInt(0)) == 0 { state.ProvingPeriodStart = ctx.BlockHeight() } inc := big.NewInt(1) state.Power = state.Power.Add(state.Power, inc) comms := types.Commitments{ CommD: proofs.CommD{}, CommR: proofs.CommR{}, CommRStar: proofs.CommRStar{}, } copy(comms.CommD[:], commD) copy(comms.CommR[:], commR) copy(comms.CommRStar[:], commRStar) state.LastUsedSectorID = sectorID state.SectorCommitments[sectorIDstr] = comms _, ret, err := ctx.Send(address.StorageMarketAddress, "updatePower", nil, []interface{}{inc}) if err != nil { return nil, err } if ret != 0 { return nil, Errors[ErrStoragemarketCallFailed] } return nil, nil }) if err != nil { return errors.CodeError(err), err } return 0, nil } // GetKey returns the public key for this miner. func (ma *Actor) GetKey(ctx exec.VMContext) ([]byte, uint8, error) { if err := ctx.Charge(100); err != nil { return nil, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas") } var state State out, err := actor.WithState(ctx, &state, func() (interface{}, error) { return state.PublicKey, nil }) if err != nil { return nil, errors.CodeError(err), err } validOut, ok := out.([]byte) if !ok { return nil, 1, errors.NewRevertError("expected a byte slice") } return validOut, 0, nil } // GetPeerID returns the libp2p peer ID that this miner can be reached at. func (ma *Actor) GetPeerID(ctx exec.VMContext) (peer.ID, uint8, error) { if err := ctx.Charge(100); err != nil { return peer.ID(""), exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas") } var state State chunk, err := ctx.ReadStorage() if err != nil { return peer.ID(""), errors.CodeError(err), err } if err := actor.UnmarshalStorage(chunk, &state); err != nil { return peer.ID(""), errors.CodeError(err), err } return state.PeerID, 0, nil } // UpdatePeerID is used to update the peerID this miner is operating under. func (ma *Actor) UpdatePeerID(ctx exec.VMContext, pid peer.ID) (uint8, error) { if err := ctx.Charge(100); err != nil { return exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas") } var storage State _, err := actor.WithState(ctx, &storage, func() (interface{}, error) { // verify that the caller is authorized to perform update if ctx.Message().From != storage.Owner { return nil, Errors[ErrCallerUnauthorized] } storage.PeerID = pid return nil, nil }) if err != nil { return errors.CodeError(err), err } return 0, nil } // GetPledge returns the number of pledged sectors func (ma *Actor) GetPledge(ctx exec.VMContext) (*big.Int, uint8, error) { if err := ctx.Charge(100); err != nil { return nil, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas") } var state State ret, err := actor.WithState(ctx, &state, func() (interface{}, error) { return state.PledgeSectors, nil }) if err != nil { return nil, errors.CodeError(err), err } pledgeSectors, ok := ret.(*big.Int) if !ok { return nil, 1, errors.NewFaultError("Failed to retrieve pledge sectors") } return pledgeSectors, 0, nil } // GetPower returns the amount of proven sectors for this miner. func (ma *Actor) GetPower(ctx exec.VMContext) (*big.Int, uint8, error) { if err := ctx.Charge(100); err != nil { return nil, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas") } var state State ret, err := actor.WithState(ctx, &state, func() (interface{}, error) { return state.Power, nil }) if err != nil { return nil, errors.CodeError(err), err } power, ok := ret.(*big.Int) if !ok { return nil, 1, errors.NewFaultErrorf("expected *big.Int to be returned, but got %T instead", ret) } return power, 0, nil } // SubmitPoSt is used to submit a coalesced PoST to the chain to convince the chain // that you have been actually storing the files you claim to be. func (ma *Actor) SubmitPoSt(ctx exec.VMContext, proof []byte) (uint8, error) { if err := ctx.Charge(100); err != nil { return exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas") } if len(proof) != PoStProofLength { return 0, errors.NewRevertError("invalid sized proof") } var state State _, err := actor.WithState(ctx, &state, func() (interface{}, error) { // verify that the caller is authorized to perform update if ctx.Message().From != state.Owner { return nil, Errors[ErrCallerUnauthorized] } // reach in to actor storage to grab comm-r for each committed sector var commRs []proofs.CommR for _, v := range state.SectorCommitments { commRs = append(commRs, v.CommR) } // copy message-bytes into PoStProof slice postProof := proofs.PoStProof{} copy(postProof[:], proof) // TODO: use IsPoStValidWithProver when proofs are implemented req := proofs.VerifyPoSTRequest{ ChallengeSeed: proofs.PoStChallengeSeed{}, CommRs: commRs, Faults: []uint64{}, Proof: postProof, } res, err := (&proofs.RustVerifier{}).VerifyPoST(req) if err != nil { return nil, errors.RevertErrorWrap(err, "failed to verify PoSt") } if !res.IsValid { return nil, Errors[ErrInvalidPoSt] } // Check if we submitted it in time provingPeriodEnd := state.ProvingPeriodStart.Add(ProvingPeriodBlocks) if ctx.BlockHeight().LessEqual(provingPeriodEnd) { state.ProvingPeriodStart = provingPeriodEnd state.LastPoSt = ctx.BlockHeight() } else { // Not great. // TODO: charge penalty return nil, errors.NewRevertErrorf("submitted PoSt late, need to pay a fee") } return nil, nil }) if err != nil { return errors.CodeError(err), err } return 0, nil } // GetProvingPeriodStart returns the current ProvingPeriodStart value. func (ma *Actor) GetProvingPeriodStart(ctx exec.VMContext) (*types.BlockHeight, uint8, error) { if err := ctx.Charge(100); err != nil { return nil, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas") } chunk, err := ctx.ReadStorage() if err != nil { return nil, errors.CodeError(err), err } var state State if err := actor.UnmarshalStorage(chunk, &state); err != nil { return nil, errors.CodeError(err), err } return state.ProvingPeriodStart, 0, nil }
1
16,237
maybe start an issue to capture params that we need final values for and link to it here?
filecoin-project-venus
go
@@ -288,6 +288,8 @@ class _RunData(object): alternate_services = set() direct_access_sources = set() for backend in backend_service.backends: + if not backend.get('group'): + continue instance_group = self.find_instance_group_by_url( backend.get('group')) if not instance_group:
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Scanner for the Identity-Aware Proxy rules engine.""" import collections from google.cloud.forseti.common.gcp_type import ( backend_service as backend_service_type) from google.cloud.forseti.common.gcp_type import ( firewall_rule as firewall_rule_type) from google.cloud.forseti.common.gcp_type import instance as instance_type from google.cloud.forseti.common.gcp_type import ( instance_group as instance_group_type) from google.cloud.forseti.common.gcp_type import ( instance_group_manager as instance_group_manager_type) from google.cloud.forseti.common.gcp_type import ( instance_template as instance_template_type) from google.cloud.forseti.common.gcp_type import project as project_type from google.cloud.forseti.common.gcp_type import network as network_type from google.cloud.forseti.common.gcp_type.resource import ResourceType from google.cloud.forseti.common.util import logger from google.cloud.forseti.scanner.audit import iap_rules_engine from google.cloud.forseti.scanner.scanners import base_scanner LOGGER = logger.get_logger(__name__) IapResource = collections.namedtuple( 'IapResource', ['project_full_name', 'backend_service', 'alternate_services', 'direct_access_sources', 'iap_enabled'] ) NetworkPort = collections.namedtuple( 'NetworkPort', ['network', 'port']) class _RunData(object): """Information needed to compute IAP properties.""" def __init__(self, backend_services, firewall_rules, instances, instance_groups, instance_group_managers, instance_templates): """Initialize. Args: backend_services (list): BackendService firewall_rules (list): FirewallRule instances (list): Instance instance_groups (list): InstanceGroup instance_group_managers (list): InstanceGroupMananger instance_templates (list): InstanceTemplate """ self.resource_counts = { ResourceType.BACKEND_SERVICE: len(backend_services), ResourceType.FIREWALL_RULE: len(firewall_rules), ResourceType.INSTANCE: len(instances), ResourceType.INSTANCE_GROUP: len(instance_groups), ResourceType.INSTANCE_GROUP_MANAGER: len(instance_group_managers), ResourceType.INSTANCE_TEMPLATE: len(instance_templates), } self.backend_services = backend_services self.firewall_rules = firewall_rules self.instances_by_key = dict((instance.key, instance) for instance in instances) self.instance_groups_by_key = dict((instance_group.key, instance_group) for instance_group in instance_groups) self.instance_templates_by_group_key = {} instance_templates_by_key = dict((instance_template.key, instance_template) for instance_template in instance_templates) for instance_group_manager in instance_group_managers: instance_group_url = instance_group_manager.instance_group if not instance_group_url: continue instance_group_key = instance_group_type.Key.from_url( instance_group_url) instance_template_url = instance_group_manager.instance_template instance_template_key = instance_template_type.Key.from_url( instance_template_url) instance_template = instance_templates_by_key.get( instance_template_key) if instance_template: self.instance_templates_by_group_key[ instance_group_key] = instance_template @staticmethod def instance_group_network_port(backend_service, instance_group): """Which network and port is used for a service's backends? A backend service can communicate with its backends on a different network and port number for each of the service's backend instance groups. Args: backend_service (BackendService): service to find port for instance_group (InstanceGroup): group to find port for Returns: NetworkPort: how the service communicates with backends """ # Field 'port' from backend service has been deprecated in favor of # portName. PortName is required when the load balancing scheme is # EXTERNAL. When the load balancing scheme is INTERNAL, this field # is not used, it has the same behavior of port so we can just use # portName to get the port from instance group. port = -1 if backend_service.port: # Although deprecated, it's still returned by the API and might # contain legacy data for customers who have not migrated. port = int(backend_service.port) if backend_service.port_name: for named_port in instance_group.named_ports or []: if named_port.get('name') == backend_service.port_name: port = int(named_port.get('port')) break if port == -1: LOGGER.error('NetworkPort can not be constructed. Unable to ' 'find the appropriate port from backend service ' 'or instance group.') return None return NetworkPort( network=network_type.Key.from_url( instance_group.network, project_id=instance_group.project_id), port=port) def find_instance_group_by_url(self, instance_group_url): """Find an instance group for the given URL. Args: instance_group_url (str): instance group URL Returns: InstanceGroup: instance group """ target_key = instance_group_type.Key.from_url(instance_group_url) return self.instance_groups_by_key.get(target_key) def find_instance_by_url(self, instance_url): """Find an instance for the given URL. Args: instance_url (str): instance URL Returns: Instance: instance """ target_key = instance_type.Key.from_url(instance_url) return self.instances_by_key.get(target_key) def firewall_allowed_sources(self, network_port, tag): """Which source (networks, tags) can connect to the given destination? Args: network_port (NetworkPort): connection destination tag (str): instance tag for destination instance Returns: set: allowed source networks and tags """ allowed_sources = set() def firewall_entry_applies(firewall_entry): """Does a firewall entry match the current source? Args: firewall_entry (dict): An 'allowed' or 'denied' dict from a FirewallRule. Returns: bool: whether the entry is relevant to the source being evaluated """ if firewall_entry.get('IPProtocol') not in ( None, 6, '6', 'tcp', 'all'): return False if not firewall_entry.get('ports'): return True for fw_port_range in firewall_entry.get('ports'): fw_port_range = str(fw_port_range) if '-' in fw_port_range: range_ends = fw_port_range.split('-') fw_port_min = int(range_ends[0]) fw_port_max = int(range_ends[1]) else: fw_port_min = int(fw_port_range) fw_port_max = int(fw_port_range) if fw_port_min <= network_port.port <= fw_port_max: return True return False relevant_rules_by_priority = collections.defaultdict(lambda: []) for firewall_rule in self.firewall_rules: firewall_network = network_type.Key.from_url( firewall_rule.network, project_id=firewall_rule.project_id) if firewall_network != network_port.network: continue if (firewall_rule.target_tags and tag not in firewall_rule.target_tags): continue if firewall_rule.direction and firewall_rule.direction != 'INGRESS': continue relevant_rules_by_priority[firewall_rule.priority].append( firewall_rule) priorities = relevant_rules_by_priority.keys() priorities.sort(reverse=True) for priority in priorities: # DENY at a given priority takes precedence over ALLOW for firewall_rule in relevant_rules_by_priority[priority]: for allowed in firewall_rule.allowed or []: if firewall_entry_applies(allowed): allowed_sources.update( firewall_rule.source_ranges or []) allowed_sources.update( firewall_rule.source_tags or []) continue for firewall_rule in relevant_rules_by_priority[priority]: for denied in firewall_rule.denied or []: if firewall_entry_applies(denied): allowed_sources.difference_update( firewall_rule.source_ranges or []) allowed_sources.difference_update( firewall_rule.source_tags or []) return allowed_sources def tags_for_instance_group(self, instance_group): """Which instance tags are used for an instance group? Includes tags used by instances in the group and, for managed groups, tags in the group's template. Args: instance_group (InstanceGroup): the group to query tags for Returns: set: tags """ tags = set() # Get tags from actual instances. for instance_url in instance_group.instance_urls: instance = self.find_instance_by_url(instance_url) if not instance: continue tags.update(instance.tags.get('items', [])) # If it's a managed instance group, also get tags from the # instance template. instance_template = self.instance_templates_by_group_key.get( instance_group.key) if instance_template: template_tags = instance_template.properties.get('tags', {}) tags.update(template_tags.get('items', [])) return tags def make_iap_resource(self, backend_service, project_full_name): """Get an IapResource for a service. Args: backend_service (BackendService): service to create a resource for project_full_name (str): The full path to the parent project including all ancestors. Returns: IapResource: the corresponding resource """ alternate_services = set() direct_access_sources = set() for backend in backend_service.backends: instance_group = self.find_instance_group_by_url( backend.get('group')) if not instance_group: continue network_port = self.instance_group_network_port( backend_service, instance_group) if not network_port: continue direct_access_sources.update( self.firewall_allowed_sources(network_port, None)) tags = self.tags_for_instance_group(instance_group) for tag in tags: direct_access_sources.update( self.firewall_allowed_sources( network_port, tag)) # Don't count the load balancer as a direct access source. # The load balancer egress IPs are documented here: # https://cloud.google.com/compute/docs/load-balancing/http/ # (In theory they can change, but it's not common (since it's # a backwards-incompatible change for HTTP load balancer # customers.) 35.191/16 was recently announced; when Google # added that one, they sent out a mandatory service # announcement a year before the new range was used.) direct_access_sources.discard('130.211.0.0/22') direct_access_sources.discard('35.191.0.0/16') for backend_service2 in self.backend_services: if self.is_alternate_service(backend_service, backend_service2): alternate_services.add(backend_service2.key) return IapResource( project_full_name=project_full_name, backend_service=backend_service, alternate_services=alternate_services, direct_access_sources=direct_access_sources, iap_enabled=(backend_service.iap.get('enabled', False) if backend_service.iap else False)) def is_alternate_service(self, backend_service, backend_service2): """Do two backend services expose any of the same (instance, port) ? Args: backend_service (BackendService): One backend service backend_service2 (BackendService): The other backend service Returns: bool: whether the two services share any (instance, port) """ if backend_service2.key == backend_service.key: return False for backend in backend_service.backends: instance_group = self.find_instance_group_by_url( backend.get('group')) if not instance_group: continue network_port = self.instance_group_network_port( backend_service, instance_group) if not network_port: continue for backend2 in backend_service2.backends: instance_group2 = self.find_instance_group_by_url( backend2.get('group')) if not instance_group2: continue network_port2 = self.instance_group_network_port( backend_service2, instance_group2) if not network_port2: continue if network_port != network_port2: continue if instance_group == instance_group2: return True for instance_url in instance_group.instance_urls: if instance_url in instance_group2.instance_urls: return True return False class IapScanner(base_scanner.BaseScanner): """Pipeline to IAP-related data from DAO.""" SCANNER_OUTPUT_CSV_FMT = 'scanner_output_iap.{}.csv' def __init__(self, global_configs, scanner_configs, service_config, model_name, snapshot_timestamp, rules): """Initialization. Args: global_configs (dict): Global configurations. scanner_configs (dict): Scanner configurations. service_config (ServiceConfig): Forseti 2.0 service configs model_name (str): name of the data model snapshot_timestamp (str): The snapshot timestamp. rules (str): Fully-qualified path and filename of the rules file. """ super(IapScanner, self).__init__( global_configs, scanner_configs, service_config, model_name, snapshot_timestamp, rules) self.rules_engine = iap_rules_engine.IapRulesEngine( rules_file_path=self.rules, snapshot_timestamp=self.snapshot_timestamp) self.rules_engine.build_rule_book(self.global_configs) self.scoped_session, self.data_access = ( service_config.model_manager.get(model_name)) @staticmethod def _flatten_violations(violations): """Flatten RuleViolations into a dict for each RuleViolation member. Args: violations (list): The RuleViolations to flatten. Yields: dict: Iterator of RuleViolations as a dict per member. """ for violation in violations: alternate_services = ['%s/%s' % (bs_key.project_id, bs_key.name) for bs_key in violation.alternate_services_violations] alternate_services.sort() alternate_services_str = ', '.join(alternate_services) direct_access_sources = violation.direct_access_sources_violations direct_access_sources.sort() direct_access_str = ', '.join(direct_access_sources) violation_data = { 'alternate_services_violations': alternate_services_str, 'direct_access_sources_violations': direct_access_str, 'iap_enabled_violation': str(violation.iap_enabled_violation), 'resource_name': violation.resource_name } yield { 'resource_id': violation.resource_id, 'resource_name': violation.resource_name, 'resource_type': violation.resource_type, 'full_name': violation.full_name, 'rule_index': violation.rule_index, 'rule_name': violation.rule_name, 'violation_type': violation.violation_type, 'violation_data': violation_data, 'resource_data': violation.resource_data } def _output_results(self, all_violations): """Output results. Args: all_violations (list): A list of violations. """ all_violations = self._flatten_violations(all_violations) self._output_results_to_db(all_violations) def _get_backend_services(self, parent_type_name): """Retrieves backend services. Args: parent_type_name (str): The parent resource type and name to pull. Returns: list: BackendService """ backend_services = [] with self.scoped_session as session: for backend_service in self.data_access.scanner_iter( session, 'backendservice', parent_type_name=parent_type_name): backend_services.append( backend_service_type.BackendService.from_json( full_name=backend_service.full_name, project_id=backend_service.parent.name, json_string=backend_service.data)) return backend_services def _get_firewall_rules(self, parent_type_name): """Retrieves firewall rules. Args: parent_type_name (str): The parent resource type and name to pull. Returns: list: FirewallRule """ firewall_rules = [] with self.scoped_session as session: for firewall_rule in self.data_access.scanner_iter( session, 'firewall', parent_type_name=parent_type_name): firewall_rules.append( firewall_rule_type.FirewallRule.from_json( project_id=firewall_rule.parent.name, json_string=firewall_rule.data)) return firewall_rules def _get_instances(self, parent_type_name): """Retrieves instances. Args: parent_type_name (str): The parent resource type and name to pull. Returns: list: Instance """ instances = [] with self.scoped_session as session: for instance in self.data_access.scanner_iter( session, 'instance', parent_type_name=parent_type_name): project = project_type.Project( project_id=instance.parent.name, full_name=instance.parent.full_name, ) instances.append( instance_type.Instance.from_json( parent=project, json_string=instance.data)) return instances def _get_instance_groups(self, parent_type_name): """Retrieves instance groups. Args: parent_type_name (str): The parent resource type and name to pull. Returns: list: InstanceGroup """ instance_groups = [] with self.scoped_session as session: for instance_group in self.data_access.scanner_iter( session, 'instancegroup', parent_type_name=parent_type_name): instance_groups.append( instance_group_type.InstanceGroup.from_json( project_id=instance_group.parent.name, json_string=instance_group.data)) return instance_groups def _get_instance_group_managers(self, parent_type_name): """Retrieves instance group managers. Args: parent_type_name (str): The parent resource type and name to pull. Returns: list: InstanceGroupManager """ instance_group_managers = [] with self.scoped_session as session: for instance_group_manager in self.data_access.scanner_iter( session, 'instancegroupmanager', parent_type_name=parent_type_name): instance_group_managers.append( instance_group_manager_type.InstanceGroupManager.from_json( project_id=instance_group_manager.parent.name, json_string=instance_group_manager.data)) return instance_group_managers def _get_instance_templates(self, parent_type_name): """Retrieves instance templates. Args: parent_type_name (str): The parent resource type and name to pull. Returns: list: InstanceTemplate """ instance_templates = [] with self.scoped_session as session: for instance_template in self.data_access.scanner_iter( session, 'instancetemplate', parent_type_name=parent_type_name): instance_templates.append( instance_template_type.InstanceTemplate.from_json( project_id=instance_template.parent.name, json_string=instance_template.data)) return instance_templates def _retrieve(self): """Retrieves the data for the scanner. Yields: list: A list of IAP Resources for a project, to pass to the rules engine dict: A dict of resource counts for the project. """ projects = [] with self.scoped_session as session: for project in self.data_access.scanner_iter(session, 'project'): projects.append(project) for parent in projects: backend_services = self._get_backend_services(parent.type_name) firewall_rules = self._get_firewall_rules(parent.type_name) instances = self._get_instances(parent.type_name) instance_groups = self._get_instance_groups(parent.type_name) instance_group_managers = self._get_instance_group_managers( parent.type_name) instance_templates = self._get_instance_templates(parent.type_name) run_data = _RunData( backend_services=backend_services, firewall_rules=firewall_rules, instances=instances, instance_groups=instance_groups, instance_group_managers=instance_group_managers, instance_templates=instance_templates) iap_resources = [] for backend in backend_services: iap_resources.append( run_data.make_iap_resource(backend, parent.full_name)) yield iap_resources, run_data.resource_counts def _find_violations(self, iap_data): """Find IAP violations. Args: iap_data (iter): Generator of IAP resources and resource counts per project in the inventory. Returns: list: RuleViolation """ LOGGER.info('Finding IAP violations with %r...', self.rules_engine) ret = [] resource_counts = collections.defaultdict(int) for (iap_resources, project_resource_counts) in iap_data: for iap_resource in iap_resources: ret.extend(self.rules_engine.find_violations(iap_resource)) for key, value in project_resource_counts.items(): resource_counts[key] += value LOGGER.debug('find_violations returning %r', ret) return ret, dict(resource_counts) def run(self): """Runs the data collection.""" LOGGER.debug('In run') iap_data = self._retrieve() all_violations, _ = self._find_violations(iap_data) self._output_results(all_violations)
1
32,822
Can we instead do the check in find_instance_group_by_url() ? if we return None when group is not found, it will hit the next if statement (if not instance_group) and continue as expected, in this case we don't need to add 3 separate if statements
forseti-security-forseti-security
py
@@ -85,15 +85,15 @@ public class DateUtil { } public static int toSeconds(String hh_colon_mm) { - Pattern p = Pattern.compile("(\\d+):(\\d+)( a.m.| p.m.|)"); + Pattern p = Pattern.compile("(\\d+):(\\d+)( a.m.| p.m.| AM | PM)"); Matcher m = p.matcher(hh_colon_mm); int retval = 0; if (m.find()) { retval = SafeParse.stringToInt(m.group(1)) * 60 * 60 + SafeParse.stringToInt(m.group(2)) * 60; - if (m.group(3).equals(" .a.m") && m.group(1).equals("12")) + if (m.group(3).equals(" AM") && m.group(1).equals("12")) retval -= 12 * 60 * 60; - if (m.group(3).equals(" p.m.") && !m.group(1).equals("12")) + if (m.group(3).equals(" PM") && !(m.group(1).equals("12"))) retval += 12 * 60 * 60; } return retval;
1
package info.nightscout.utils; import android.text.format.DateUtils; import java.text.DateFormat; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Calendar; import java.util.Date; import java.util.GregorianCalendar; import java.util.TimeZone; import java.util.regex.Matcher; import java.util.regex.Pattern; import info.nightscout.androidaps.MainApp; import info.nightscout.androidaps.R; /** * The Class DateUtil. A simple wrapper around SimpleDateFormat to ease the handling of iso date string &lt;-&gt; date obj * with TZ */ public class DateUtil { /** * The date format in iso. */ public static String FORMAT_DATE_ISO = "yyyy-MM-dd'T'HH:mm:ss'Z'"; public static String FORMAT_DATE_ISO_MSEC = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"; /** * Takes in an ISO date string of the following format: * yyyy-mm-ddThh:mm:ss.ms+HoMo * * @param isoDateString the iso date string * @return the date * @throws Exception the exception */ public static Date fromISODateString(String isoDateString) throws Exception { SimpleDateFormat f = new SimpleDateFormat(FORMAT_DATE_ISO); Date date; f.setTimeZone(TimeZone.getTimeZone("UTC")); try { date = f.parse(isoDateString); } catch (ParseException e) { f = new SimpleDateFormat(FORMAT_DATE_ISO_MSEC); f.setTimeZone(TimeZone.getTimeZone("UTC")); date = f.parse(isoDateString); } return date; } /** * Render date * * @param date the date obj * @param format - if not specified, will use FORMAT_DATE_ISO * @param tz - tz to set to, if not specified uses local timezone * @return the iso-formatted date string */ public static String toISOString(Date date, String format, TimeZone tz) { if (format == null) format = FORMAT_DATE_ISO; if (tz == null) tz = TimeZone.getDefault(); DateFormat f = new SimpleDateFormat(format); f.setTimeZone(tz); return f.format(date); } public static String toISOString(Date date) { return toISOString(date, FORMAT_DATE_ISO, TimeZone.getTimeZone("UTC")); } public static String toISOString(long date) { return toISOString(new Date(date), FORMAT_DATE_ISO, TimeZone.getTimeZone("UTC")); } public static Date toDate(Integer seconds) { Calendar calendar = new GregorianCalendar(); calendar.set(Calendar.HOUR_OF_DAY, seconds / 60 / 60); String a = calendar.getTime().toString(); calendar.set(Calendar.MINUTE, (seconds / 60) % 60); String b = calendar.getTime().toString(); calendar.set(Calendar.SECOND, 0); String c = calendar.getTime().toString(); return calendar.getTime(); } public static int toSeconds(String hh_colon_mm) { Pattern p = Pattern.compile("(\\d+):(\\d+)( a.m.| p.m.|)"); Matcher m = p.matcher(hh_colon_mm); int retval = 0; if (m.find()) { retval = SafeParse.stringToInt(m.group(1)) * 60 * 60 + SafeParse.stringToInt(m.group(2)) * 60; if (m.group(3).equals(" .a.m") && m.group(1).equals("12")) retval -= 12 * 60 * 60; if (m.group(3).equals(" p.m.") && !m.group(1).equals("12")) retval += 12 * 60 * 60; } return retval; } public static String dateString(Date date) { //return DateUtils.formatDateTime(MainApp.instance(), date.getTime(), DateUtils.FORMAT_SHOW_DATE); this provide month name not number DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT); return df.format(date); } public static String dateString(long mills) { //return DateUtils.formatDateTime(MainApp.instance(), mills, DateUtils.FORMAT_SHOW_DATE); this provide month name not number DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT); return df.format(mills); } public static String timeString(Date date) { return DateUtils.formatDateTime(MainApp.instance(), date.getTime(), DateUtils.FORMAT_SHOW_TIME); } public static String timeString(long mills) { return DateUtils.formatDateTime(MainApp.instance(), mills, DateUtils.FORMAT_SHOW_TIME); } public static String dateAndTimeString(Date date) { return dateString(date) + " " + timeString(date); } public static String dateAndTimeString(long mills) { return dateString(mills) + " " + timeString(mills); } public static String minAgo(long time) { int mins = (int) ((System.currentTimeMillis() - time) / 1000 / 60); return String.format(MainApp.sResources.getString(R.string.minago), mins); } }
1
29,576
on my android ".a.m" and ".p.m" is used add `||` here to support both variants
MilosKozak-AndroidAPS
java
@@ -4647,7 +4647,7 @@ bool CoreChecks::PreCallValidateCreateImageView(VkDevice device, const VkImageVi skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-01586", "%s", ss.str().c_str()); } } else { - if ((!(image_flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR)) || (!FormatIsMultiplane(image_format))) { + if (!(image_flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR)) { // Format MUST be compatible (in the same format compatibility class) as the format the image was created with if (FormatCompatibilityClass(image_format) != FormatCompatibilityClass(view_format)) { const char *error_vuid;
1
/* Copyright (c) 2015-2020 The Khronos Group Inc. * Copyright (c) 2015-2020 Valve Corporation * Copyright (c) 2015-2020 LunarG, Inc. * Copyright (C) 2015-2020 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Mark Lobodzinski <[email protected]> * Author: Dave Houlton <[email protected]> * Shannon McPherson <[email protected]> */ #include <cmath> #include <set> #include <sstream> #include <string> #include "vk_enum_string_helper.h" #include "vk_format_utils.h" #include "vk_layer_data.h" #include "vk_layer_utils.h" #include "vk_layer_logging.h" #include "vk_typemap_helper.h" #include "chassis.h" #include "core_validation.h" #include "shader_validation.h" #include "descriptor_sets.h" #include "buffer_validation.h" // Transfer VkImageSubresourceLayers into VkImageSubresourceRange struct static VkImageSubresourceRange RangeFromLayers(const VkImageSubresourceLayers &subresource_layers) { VkImageSubresourceRange subresource_range; subresource_range.aspectMask = subresource_layers.aspectMask; subresource_range.baseArrayLayer = subresource_layers.baseArrayLayer; subresource_range.layerCount = subresource_layers.layerCount; subresource_range.baseMipLevel = subresource_layers.mipLevel; subresource_range.levelCount = 1; return subresource_range; } static VkImageSubresourceRange MakeImageFullRange(const VkImageCreateInfo &create_info) { const auto format = create_info.format; VkImageSubresourceRange init_range{0, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS}; #ifdef VK_USE_PLATFORM_ANDROID_KHR const VkExternalFormatANDROID *pExternalFormatANDROID = lvl_find_in_chain<VkExternalFormatANDROID>(&create_info); bool isExternalFormatConversion = (pExternalFormatANDROID != nullptr && pExternalFormatANDROID->externalFormat != 0); #else bool isExternalFormatConversion = false; #endif if (FormatIsColor(format) || FormatIsMultiplane(format) || isExternalFormatConversion) { init_range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Normalization will expand this for multiplane } else { init_range.aspectMask = (FormatHasDepth(format) ? VK_IMAGE_ASPECT_DEPTH_BIT : 0) | (FormatHasStencil(format) ? VK_IMAGE_ASPECT_STENCIL_BIT : 0); } return NormalizeSubresourceRange(create_info, init_range); } std::vector<VkImageView> FRAMEBUFFER_STATE::GetUsedAttachments( const safe_VkSubpassDescription2 &subpasses, const std::vector<IMAGE_VIEW_STATE *> &imagelessFramebufferAttachments) { std::vector<VkImageView> attachment_views(createInfo.attachmentCount, VK_NULL_HANDLE); const bool imageless = (createInfo.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) ? true : false; for (uint32_t index = 0; index < subpasses.inputAttachmentCount; ++index) { const uint32_t attachment_index = subpasses.pInputAttachments[index].attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { if (imageless) { attachment_views[attachment_index] = imagelessFramebufferAttachments[attachment_index]->image_view; } else { attachment_views[attachment_index] = createInfo.pAttachments[attachment_index]; } } } for (uint32_t index = 0; index < subpasses.colorAttachmentCount; ++index) { const uint32_t attachment_index = subpasses.pColorAttachments[index].attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { if (imageless) { attachment_views[attachment_index] = imagelessFramebufferAttachments[attachment_index]->image_view; } else { attachment_views[attachment_index] = createInfo.pAttachments[attachment_index]; } } if (subpasses.pResolveAttachments) { const uint32_t attachment_index2 = subpasses.pResolveAttachments[index].attachment; if (attachment_index2 != VK_ATTACHMENT_UNUSED) { if (imageless) { attachment_views[attachment_index2] = imagelessFramebufferAttachments[attachment_index2]->image_view; } else { attachment_views[attachment_index2] = createInfo.pAttachments[attachment_index2]; } } } } if (subpasses.pDepthStencilAttachment) { const uint32_t attachment_index = subpasses.pDepthStencilAttachment->attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { if (imageless) { attachment_views[attachment_index] = imagelessFramebufferAttachments[attachment_index]->image_view; } else { attachment_views[attachment_index] = createInfo.pAttachments[attachment_index]; } } } return attachment_views; } IMAGE_STATE::IMAGE_STATE(VkDevice dev, VkImage img, const VkImageCreateInfo *pCreateInfo) : image(img), safe_create_info(pCreateInfo), createInfo(*safe_create_info.ptr()), valid(false), acquired(false), shared_presentable(false), layout_locked(false), get_sparse_reqs_called(false), sparse_metadata_required(false), sparse_metadata_bound(false), has_ahb_format(false), is_swapchain_image(false), ahb_format(0), full_range{MakeImageFullRange(createInfo)}, create_from_swapchain(VK_NULL_HANDLE), bind_swapchain(VK_NULL_HANDLE), bind_swapchain_imageIndex(0), range_encoder(full_range), disjoint(false), plane0_memory_requirements_checked(false), plane1_memory_requirements_checked(false), plane2_memory_requirements_checked(false), subresource_encoder(full_range), fragment_encoder(nullptr), store_device_as_workaround(dev), // TODO REMOVE WHEN encoder can be const sparse_requirements{} { if ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) && (createInfo.queueFamilyIndexCount > 0)) { uint32_t *pQueueFamilyIndices = new uint32_t[createInfo.queueFamilyIndexCount]; for (uint32_t i = 0; i < createInfo.queueFamilyIndexCount; i++) { pQueueFamilyIndices[i] = pCreateInfo->pQueueFamilyIndices[i]; } createInfo.pQueueFamilyIndices = pQueueFamilyIndices; } if (createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) { sparse = true; } auto *externalMemoryInfo = lvl_find_in_chain<VkExternalMemoryImageCreateInfo>(pCreateInfo->pNext); if (externalMemoryInfo) { external_memory_handle = externalMemoryInfo->handleTypes; } } bool IMAGE_STATE::IsCreateInfoEqual(const VkImageCreateInfo &other_createInfo) const { bool is_equal = (createInfo.sType == other_createInfo.sType) && (createInfo.flags == other_createInfo.flags); is_equal = is_equal && IsImageTypeEqual(other_createInfo) && IsFormatEqual(other_createInfo); is_equal = is_equal && IsMipLevelsEqual(other_createInfo) && IsArrayLayersEqual(other_createInfo); is_equal = is_equal && IsUsageEqual(other_createInfo) && IsInitialLayoutEqual(other_createInfo); is_equal = is_equal && IsExtentEqual(other_createInfo) && IsTilingEqual(other_createInfo); is_equal = is_equal && IsSamplesEqual(other_createInfo) && IsSharingModeEqual(other_createInfo); return is_equal && ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) ? IsQueueFamilyIndicesEqual(other_createInfo) : true); } // Check image compatibility rules for VK_NV_dedicated_allocation_image_aliasing bool IMAGE_STATE::IsCreateInfoDedicatedAllocationImageAliasingCompatible(const VkImageCreateInfo &other_createInfo) const { bool is_compatible = (createInfo.sType == other_createInfo.sType) && (createInfo.flags == other_createInfo.flags); is_compatible = is_compatible && IsImageTypeEqual(other_createInfo) && IsFormatEqual(other_createInfo); is_compatible = is_compatible && IsMipLevelsEqual(other_createInfo); is_compatible = is_compatible && IsUsageEqual(other_createInfo) && IsInitialLayoutEqual(other_createInfo); is_compatible = is_compatible && IsSamplesEqual(other_createInfo) && IsSharingModeEqual(other_createInfo); is_compatible = is_compatible && ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) ? IsQueueFamilyIndicesEqual(other_createInfo) : true); is_compatible = is_compatible && IsTilingEqual(other_createInfo); is_compatible = is_compatible && createInfo.extent.width <= other_createInfo.extent.width && createInfo.extent.height <= other_createInfo.extent.height && createInfo.extent.depth <= other_createInfo.extent.depth && createInfo.arrayLayers <= other_createInfo.arrayLayers; return is_compatible; } bool IMAGE_STATE::IsCompatibleAliasing(IMAGE_STATE *other_image_state) { if (!is_swapchain_image && !other_image_state->is_swapchain_image && !(createInfo.flags & other_image_state->createInfo.flags & VK_IMAGE_CREATE_ALIAS_BIT)) return false; if ((create_from_swapchain == VK_NULL_HANDLE) && binding.mem_state && (binding.mem_state == other_image_state->binding.mem_state) && (binding.offset == other_image_state->binding.offset) && IsCreateInfoEqual(other_image_state->createInfo)) { return true; } if ((bind_swapchain == other_image_state->bind_swapchain) && (bind_swapchain != VK_NULL_HANDLE)) { return true; } return false; } IMAGE_VIEW_STATE::IMAGE_VIEW_STATE(const std::shared_ptr<IMAGE_STATE> &im, VkImageView iv, const VkImageViewCreateInfo *ci) : image_view(iv), create_info(*ci), normalized_subresource_range(NormalizeSubresourceRange(*im, ci->subresourceRange)), range_generator(im->subresource_encoder, normalized_subresource_range), samplerConversion(VK_NULL_HANDLE), image_state(im) { auto *conversionInfo = lvl_find_in_chain<VkSamplerYcbcrConversionInfo>(create_info.pNext); if (conversionInfo) samplerConversion = conversionInfo->conversion; if (image_state) { // A light normalization of the createInfo range auto &sub_res_range = create_info.subresourceRange; sub_res_range.levelCount = ResolveRemainingLevels(&sub_res_range, image_state->createInfo.mipLevels); sub_res_range.layerCount = ResolveRemainingLayers(&sub_res_range, image_state->createInfo.arrayLayers); // Cache a full normalization (for "full image/whole image" comparisons) // normalized_subresource_range = NormalizeSubresourceRange(*image_state, ci->subresourceRange); samples = image_state->createInfo.samples; if (image_state->has_ahb_format) { // When the image has a external format the views format must be VK_FORMAT_UNDEFINED and it is required to use a sampler // Ycbcr conversion. Thus we can't extract any meaningful information from the format parameter. As a Sampler Ycbcr // conversion must be used the shader type is always float. descriptor_format_bits = DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT; } else { descriptor_format_bits = DescriptorRequirementsBitsFromFormat(create_info.format); } } } bool IMAGE_VIEW_STATE::OverlapSubresource(const IMAGE_VIEW_STATE &compare_view) const { if (image_view == compare_view.image_view) { return true; } if (image_state->image != compare_view.image_state->image) { return false; } if (normalized_subresource_range.aspectMask != compare_view.normalized_subresource_range.aspectMask) { return false; } // compare if overlap mip level if ((normalized_subresource_range.baseMipLevel < compare_view.normalized_subresource_range.baseMipLevel) && ((normalized_subresource_range.baseMipLevel + normalized_subresource_range.levelCount) <= compare_view.normalized_subresource_range.baseMipLevel)) { return false; } if ((normalized_subresource_range.baseMipLevel > compare_view.normalized_subresource_range.baseMipLevel) && (normalized_subresource_range.baseMipLevel >= (compare_view.normalized_subresource_range.baseMipLevel + compare_view.normalized_subresource_range.levelCount))) { return false; } // compare if overlap array layer if ((normalized_subresource_range.baseArrayLayer < compare_view.normalized_subresource_range.baseArrayLayer) && ((normalized_subresource_range.baseArrayLayer + normalized_subresource_range.layerCount) <= compare_view.normalized_subresource_range.baseArrayLayer)) { return false; } if ((normalized_subresource_range.baseArrayLayer > compare_view.normalized_subresource_range.baseArrayLayer) && (normalized_subresource_range.baseArrayLayer >= (compare_view.normalized_subresource_range.baseArrayLayer + compare_view.normalized_subresource_range.layerCount))) { return false; } return true; } uint32_t FullMipChainLevels(uint32_t height, uint32_t width, uint32_t depth) { // uint cast applies floor() return 1u + (uint32_t)log2(std::max({height, width, depth})); } uint32_t FullMipChainLevels(VkExtent3D extent) { return FullMipChainLevels(extent.height, extent.width, extent.depth); } uint32_t FullMipChainLevels(VkExtent2D extent) { return FullMipChainLevels(extent.height, extent.width); } bool CoreChecks::FindLayouts(VkImage image, std::vector<VkImageLayout> &layouts) const { auto image_state = GetImageState(image); if (!image_state) return false; const auto *layout_range_map = GetLayoutRangeMap(imageLayoutMap, image); if (!layout_range_map) return false; // TODO: FindLayouts function should mutate into a ValidatePresentableLayout with the loop wrapping the LogError // from the caller. You can then use decode to add the subresource of the range::begin to the error message. // TODO: what is this test and what is it supposed to do?! -- the logic doesn't match the comment below?! // TODO: Make this robust for >1 aspect mask. Now it will just say ignore potential errors in this case. if (layout_range_map->size() >= (image_state->createInfo.arrayLayers * image_state->createInfo.mipLevels + 1)) { return false; } for (auto entry : *layout_range_map) { layouts.push_back(entry.second); } return true; } // Set image layout for given VkImageSubresourceRange struct void CoreChecks::SetImageLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state, const VkImageSubresourceRange &image_subresource_range, VkImageLayout layout, VkImageLayout expected_layout) { auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image_state); assert(subresource_map); // the non-const getter must return a valid pointer if (subresource_map->SetSubresourceRangeLayout(*cb_node, image_subresource_range, layout, expected_layout)) { cb_node->image_layout_change_count++; // Change the version of this data to force revalidation } for (const auto &image : image_state.aliasing_images) { auto alias_state = GetImageState(image); // The map state of the aliases should all be in sync, so no need to check the return value subresource_map = GetImageSubresourceLayoutMap(cb_node, *alias_state); assert(subresource_map); subresource_map->SetSubresourceRangeLayout(*cb_node, image_subresource_range, layout, expected_layout); } } // Set the initial image layout for all slices of an image view void CoreChecks::SetImageViewInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &view_state, VkImageLayout layout) { if (disabled[image_layout_validation]) { return; } IMAGE_STATE *image_state = view_state.image_state.get(); auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, *image_state); subresource_map->SetSubresourceRangeInitialLayout(*cb_node, layout, view_state); for (const auto &image : image_state->aliasing_images) { image_state = GetImageState(image); subresource_map = GetImageSubresourceLayoutMap(cb_node, *image_state); subresource_map->SetSubresourceRangeInitialLayout(*cb_node, layout, view_state); } } // Set the initial image layout for a passed non-normalized subresource range void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state, const VkImageSubresourceRange &range, VkImageLayout layout) { auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image_state); assert(subresource_map); subresource_map->SetSubresourceRangeInitialLayout(*cb_node, NormalizeSubresourceRange(image_state, range), layout); for (const auto &image : image_state.aliasing_images) { auto alias_state = GetImageState(image); subresource_map = GetImageSubresourceLayoutMap(cb_node, *alias_state); assert(subresource_map); subresource_map->SetSubresourceRangeInitialLayout(*cb_node, NormalizeSubresourceRange(*alias_state, range), layout); } } void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, VkImage image, const VkImageSubresourceRange &range, VkImageLayout layout) { const IMAGE_STATE *image_state = GetImageState(image); if (!image_state) return; SetImageInitialLayout(cb_node, *image_state, range, layout); }; void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state, const VkImageSubresourceLayers &layers, VkImageLayout layout) { SetImageInitialLayout(cb_node, image_state, RangeFromLayers(layers), layout); } // Set image layout for all slices of an image view void CoreChecks::SetImageViewLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &view_state, VkImageLayout layout, VkImageLayout layoutStencil) { IMAGE_STATE *image_state = view_state.image_state.get(); VkImageSubresourceRange sub_range = view_state.normalized_subresource_range; // When changing the layout of a 3D image subresource via a 2D or 2D_ARRRAY image view, all depth slices of // the subresource mip level(s) are transitioned, ignoring any layers restriction in the subresource info. if ((image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) && (view_state.create_info.viewType != VK_IMAGE_VIEW_TYPE_3D)) { sub_range.baseArrayLayer = 0; sub_range.layerCount = image_state->createInfo.extent.depth; } if (sub_range.aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT) && layoutStencil != kInvalidLayout) { sub_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; SetImageLayout(cb_node, *image_state, sub_range, layout); sub_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; SetImageLayout(cb_node, *image_state, sub_range, layoutStencil); } else { SetImageLayout(cb_node, *image_state, sub_range, layout); } } bool CoreChecks::ValidateRenderPassLayoutAgainstFramebufferImageUsage(RenderPassCreateVersion rp_version, VkImageLayout layout, VkImage image, VkImageView image_view, VkFramebuffer framebuffer, VkRenderPass renderpass, uint32_t attachment_index, const char *variable_name) const { bool skip = false; auto image_state = GetImageState(image); const char *vuid; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *function_name = use_rp2 ? "vkCmdBeginRenderPass2()" : "vkCmdBeginRenderPass()"; if (!image_state) { LogObjectList objlist(image); objlist.add(renderpass); objlist.add(framebuffer); objlist.add(image_view); skip |= LogError(image, "VUID-VkRenderPassBeginInfo-framebuffer-parameter", "%s: RenderPass %s uses %s where pAttachments[%" PRIu32 "] = %s, which refers to an invalid image", function_name, report_data->FormatHandle(renderpass).c_str(), report_data->FormatHandle(framebuffer).c_str(), attachment_index, report_data->FormatHandle(image_view).c_str()); return skip; } auto image_usage = image_state->createInfo.usage; const auto stencil_usage_info = lvl_find_in_chain<VkImageStencilUsageCreateInfo>(image_state->createInfo.pNext); if (stencil_usage_info) { image_usage |= stencil_usage_info->stencilUsage; } // Check for layouts that mismatch image usages in the framebuffer if (layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) { vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03094" : "VUID-vkCmdBeginRenderPass-initialLayout-00895"; LogObjectList objlist(image); objlist.add(renderpass); objlist.add(framebuffer); objlist.add(image_view); skip |= LogError(objlist, vuid, "%s: Layout/usage mismatch for attachment %u in %s" " - the %s is %s but the image attached to %s via %s" " was not created with VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT", function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str()); } if (layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL && !(image_usage & (VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT))) { vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03097" : "VUID-vkCmdBeginRenderPass-initialLayout-00897"; LogObjectList objlist(image); objlist.add(renderpass); objlist.add(framebuffer); objlist.add(image_view); skip |= LogError(objlist, vuid, "%s: Layout/usage mismatch for attachment %u in %s" " - the %s is %s but the image attached to %s via %s" " was not created with VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT or VK_IMAGE_USAGE_SAMPLED_BIT", function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str()); } if (layout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT)) { vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03098" : "VUID-vkCmdBeginRenderPass-initialLayout-00898"; LogObjectList objlist(image); objlist.add(renderpass); objlist.add(framebuffer); objlist.add(image_view); skip |= LogError(objlist, vuid, "%s: Layout/usage mismatch for attachment %u in %s" " - the %s is %s but the image attached to %s via %s" " was not created with VK_IMAGE_USAGE_TRANSFER_SRC_BIT", function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str()); } if (layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) { vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03099" : "VUID-vkCmdBeginRenderPass-initialLayout-00899"; LogObjectList objlist(image); objlist.add(renderpass); objlist.add(framebuffer); objlist.add(image_view); skip |= LogError(objlist, vuid, "%s: Layout/usage mismatch for attachment %u in %s" " - the %s is %s but the image attached to %s via %s" " was not created with VK_IMAGE_USAGE_TRANSFER_DST_BIT", function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str()); } if (device_extensions.vk_khr_maintenance2) { if ((layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL || layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL || layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL || layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) && !(image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) { vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03096" : "VUID-vkCmdBeginRenderPass-initialLayout-01758"; LogObjectList objlist(image); objlist.add(renderpass); objlist.add(framebuffer); objlist.add(image_view); skip |= LogError(objlist, vuid, "%s: Layout/usage mismatch for attachment %u in %s" " - the %s is %s but the image attached to %s via %s" " was not created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT", function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str()); } } else { // The create render pass 2 extension requires maintenance 2 (the previous branch), so no vuid switch needed here. if ((layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL || layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) && !(image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) { LogObjectList objlist(image); objlist.add(renderpass); objlist.add(framebuffer); objlist.add(image_view); skip |= LogError(objlist, "VUID-vkCmdBeginRenderPass-initialLayout-00896", "%s: Layout/usage mismatch for attachment %u in %s" " - the %s is %s but the image attached to %s via %s" " was not created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT", function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str()); } } return skip; } bool CoreChecks::VerifyFramebufferAndRenderPassLayouts(RenderPassCreateVersion rp_version, const CMD_BUFFER_STATE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin, const FRAMEBUFFER_STATE *framebuffer_state) const { bool skip = false; auto const pRenderPassInfo = GetRenderPassState(pRenderPassBegin->renderPass)->createInfo.ptr(); auto const &framebufferInfo = framebuffer_state->createInfo; const VkImageView *attachments = framebufferInfo.pAttachments; auto render_pass = GetRenderPassState(pRenderPassBegin->renderPass)->renderPass; auto framebuffer = framebuffer_state->framebuffer; if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) { skip |= LogError(pCB->commandBuffer, kVUID_Core_DrawState_InvalidRenderpass, "You cannot start a render pass using a framebuffer with a different number of attachments."); } const auto *attachmentInfo = lvl_find_in_chain<VkRenderPassAttachmentBeginInfoKHR>(pRenderPassBegin->pNext); if (((framebufferInfo.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) != 0) && attachmentInfo != nullptr) { attachments = attachmentInfo->pAttachments; } if (attachments != nullptr) { const auto *const_pCB = static_cast<const CMD_BUFFER_STATE *>(pCB); for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) { auto image_view = attachments[i]; auto view_state = GetImageViewState(image_view); if (!view_state) { LogObjectList objlist(pRenderPassBegin->renderPass); objlist.add(framebuffer_state->framebuffer); objlist.add(image_view); skip |= LogError(objlist, "VUID-VkRenderPassBeginInfo-framebuffer-parameter", "vkCmdBeginRenderPass(): %s pAttachments[%" PRIu32 "] = %s is not a valid VkImageView handle", report_data->FormatHandle(framebuffer_state->framebuffer).c_str(), i, report_data->FormatHandle(image_view).c_str()); continue; } const VkImage image = view_state->create_info.image; const IMAGE_STATE *image_state = GetImageState(image); if (!image_state) { LogObjectList objlist(pRenderPassBegin->renderPass); objlist.add(framebuffer_state->framebuffer); objlist.add(image_view); objlist.add(image); skip |= LogError(objlist, "VUID-VkRenderPassBeginInfo-framebuffer-parameter", "vkCmdBeginRenderPass(): %s pAttachments[%" PRIu32 "] = %s references non-extant %s.", report_data->FormatHandle(framebuffer_state->framebuffer).c_str(), i, report_data->FormatHandle(image_view).c_str(), report_data->FormatHandle(image).c_str()); continue; } auto attachment_initial_layout = pRenderPassInfo->pAttachments[i].initialLayout; auto final_layout = pRenderPassInfo->pAttachments[i].finalLayout; // Default to expecting stencil in the same layout. auto attachment_stencil_initial_layout = attachment_initial_layout; // If a separate layout is specified, look for that. const auto *attachment_description_stencil_layout = lvl_find_in_chain<VkAttachmentDescriptionStencilLayoutKHR>(pRenderPassInfo->pAttachments[i].pNext); if (attachment_description_stencil_layout) { attachment_stencil_initial_layout = attachment_description_stencil_layout->stencilInitialLayout; } // Cast pCB to const because we don't want to create entries that don't exist here (in case the key changes to something // in common with the non-const version.) const ImageSubresourceLayoutMap *subresource_map = (attachment_initial_layout != VK_IMAGE_LAYOUT_UNDEFINED) ? GetImageSubresourceLayoutMap(const_pCB, image) : nullptr; if (subresource_map) { // If no layout information for image yet, will be checked at QueueSubmit time LayoutUseCheckAndMessage layout_check(subresource_map); bool subres_skip = false; auto pos = subresource_map->Find(view_state->normalized_subresource_range); for (; pos != subresource_map->End() && !subres_skip; ++pos) { const VkImageSubresource &subres = pos->subresource; // Allow for differing depth and stencil layouts VkImageLayout check_layout = attachment_initial_layout; if (subres.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) check_layout = attachment_stencil_initial_layout; if (!layout_check.Check(subres, check_layout, pos->current_layout, pos->initial_layout)) { subres_skip |= LogError( device, kVUID_Core_DrawState_InvalidRenderpass, "You cannot start a render pass using attachment %u where the render pass initial layout is %s " "and the %s layout of the attachment is %s. The layouts must match, or the render " "pass initial layout for the attachment must be VK_IMAGE_LAYOUT_UNDEFINED", i, string_VkImageLayout(check_layout), layout_check.message, string_VkImageLayout(layout_check.layout)); } } skip |= subres_skip; } ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_initial_layout, image, image_view, framebuffer, render_pass, i, "initial layout"); ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, final_layout, image, image_view, framebuffer, render_pass, i, "final layout"); } for (uint32_t j = 0; j < pRenderPassInfo->subpassCount; ++j) { auto &subpass = pRenderPassInfo->pSubpasses[j]; for (uint32_t k = 0; k < pRenderPassInfo->pSubpasses[j].inputAttachmentCount; ++k) { auto &attachment_ref = subpass.pInputAttachments[k]; if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { auto image_view = attachments[attachment_ref.attachment]; auto view_state = GetImageViewState(image_view); if (view_state) { auto image = view_state->create_info.image; ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view, framebuffer, render_pass, attachment_ref.attachment, "input attachment layout"); } } } for (uint32_t k = 0; k < pRenderPassInfo->pSubpasses[j].colorAttachmentCount; ++k) { auto &attachment_ref = subpass.pColorAttachments[k]; if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { auto image_view = attachments[attachment_ref.attachment]; auto view_state = GetImageViewState(image_view); if (view_state) { auto image = view_state->create_info.image; ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view, framebuffer, render_pass, attachment_ref.attachment, "color attachment layout"); if (subpass.pResolveAttachments) { ValidateRenderPassLayoutAgainstFramebufferImageUsage( rp_version, attachment_ref.layout, image, image_view, framebuffer, render_pass, attachment_ref.attachment, "resolve attachment layout"); } } } } if (pRenderPassInfo->pSubpasses[j].pDepthStencilAttachment) { auto &attachment_ref = *subpass.pDepthStencilAttachment; if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { auto image_view = attachments[attachment_ref.attachment]; auto view_state = GetImageViewState(image_view); if (view_state) { auto image = view_state->create_info.image; ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view, framebuffer, render_pass, attachment_ref.attachment, "input attachment layout"); } } } } } return skip; } void CoreChecks::TransitionAttachmentRefLayout(CMD_BUFFER_STATE *pCB, FRAMEBUFFER_STATE *pFramebuffer, const safe_VkAttachmentReference2 &ref) { if (ref.attachment != VK_ATTACHMENT_UNUSED) { IMAGE_VIEW_STATE *image_view = nullptr; if (pFramebuffer->createInfo.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) { const auto attachment_info = lvl_find_in_chain<VkRenderPassAttachmentBeginInfoKHR>(pCB->activeRenderPassBeginInfo.pNext); if (attachment_info) image_view = GetImageViewState(attachment_info->pAttachments[ref.attachment]); } else { image_view = GetAttachmentImageViewState(pCB, pFramebuffer, ref.attachment); } if (image_view) { VkImageLayout stencil_layout = kInvalidLayout; const auto *attachment_reference_stencil_layout = lvl_find_in_chain<VkAttachmentReferenceStencilLayoutKHR>(ref.pNext); if (attachment_reference_stencil_layout) { stencil_layout = attachment_reference_stencil_layout->stencilLayout; } SetImageViewLayout(pCB, *image_view, ref.layout, stencil_layout); } } } void CoreChecks::TransitionSubpassLayouts(CMD_BUFFER_STATE *pCB, const RENDER_PASS_STATE *render_pass_state, const int subpass_index, FRAMEBUFFER_STATE *framebuffer_state) { assert(render_pass_state); if (framebuffer_state) { auto const &subpass = render_pass_state->createInfo.pSubpasses[subpass_index]; for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { TransitionAttachmentRefLayout(pCB, framebuffer_state, subpass.pInputAttachments[j]); } for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { TransitionAttachmentRefLayout(pCB, framebuffer_state, subpass.pColorAttachments[j]); } if (subpass.pDepthStencilAttachment) { TransitionAttachmentRefLayout(pCB, framebuffer_state, *subpass.pDepthStencilAttachment); } } } // Transition the layout state for renderpass attachments based on the BeginRenderPass() call. This includes: // 1. Transition into initialLayout state // 2. Transition from initialLayout to layout used in subpass 0 void CoreChecks::TransitionBeginRenderPassLayouts(CMD_BUFFER_STATE *cb_state, const RENDER_PASS_STATE *render_pass_state, FRAMEBUFFER_STATE *framebuffer_state) { // First transition into initialLayout auto const rpci = render_pass_state->createInfo.ptr(); for (uint32_t i = 0; i < rpci->attachmentCount; ++i) { IMAGE_VIEW_STATE *view_state = nullptr; if (framebuffer_state->createInfo.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) { const auto attachment_info = lvl_find_in_chain<VkRenderPassAttachmentBeginInfoKHR>(cb_state->activeRenderPassBeginInfo.pNext); if (attachment_info) view_state = GetImageViewState(attachment_info->pAttachments[i]); } else { view_state = GetAttachmentImageViewState(cb_state, framebuffer_state, i); } if (view_state) { VkImageLayout stencil_layout = kInvalidLayout; const auto *attachment_description_stencil_layout = lvl_find_in_chain<VkAttachmentDescriptionStencilLayoutKHR>(rpci->pAttachments[i].pNext); if (attachment_description_stencil_layout) { stencil_layout = attachment_description_stencil_layout->stencilInitialLayout; } SetImageViewLayout(cb_state, *view_state, rpci->pAttachments[i].initialLayout, stencil_layout); } } // Now transition for first subpass (index 0) TransitionSubpassLayouts(cb_state, render_pass_state, 0, framebuffer_state); } bool VerifyAspectsPresent(VkImageAspectFlags aspect_mask, VkFormat format) { if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != 0) { if (!(FormatIsColor(format) || FormatIsMultiplane(format))) return false; } if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != 0) { if (!FormatHasDepth(format)) return false; } if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != 0) { if (!FormatHasStencil(format)) return false; } if (0 != (aspect_mask & (VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | VK_IMAGE_ASPECT_PLANE_2_BIT_KHR))) { if (FormatPlaneCount(format) == 1) return false; } return true; } // Verify an ImageMemoryBarrier's old/new ImageLayouts are compatible with the Image's ImageUsageFlags. bool CoreChecks::ValidateBarrierLayoutToImageUsage(const VkImageMemoryBarrier &img_barrier, bool new_not_old, VkImageUsageFlags usage_flags, const char *func_name, const char *barrier_pname) const { bool skip = false; const VkImageLayout layout = (new_not_old) ? img_barrier.newLayout : img_barrier.oldLayout; const char *msg_code = kVUIDUndefined; // sentinel value meaning "no error" switch (layout) { case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: if ((usage_flags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) == 0) { msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01208"; } break; case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: if ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) { msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01209"; } break; case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: if ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) { msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01210"; } break; case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: if ((usage_flags & (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) == 0) { msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01211"; } break; case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: if ((usage_flags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) == 0) { msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01212"; } break; case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: if ((usage_flags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) { msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01213"; } break; case VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV: if ((usage_flags & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV) == 0) { msg_code = "VUID-VkImageMemoryBarrier-oldLayout-02088"; } break; case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL: if ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) { msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01658"; } break; case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL: if ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) { msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01659"; } break; default: // Other VkImageLayout values do not have VUs defined in this context. break; } if (msg_code != kVUIDUndefined) { skip |= LogError(img_barrier.image, msg_code, "%s: Image barrier %s %s Layout=%s is not compatible with %s usage flags 0x%" PRIx32 ".", func_name, barrier_pname, ((new_not_old) ? "new" : "old"), string_VkImageLayout(layout), report_data->FormatHandle(img_barrier.image).c_str(), usage_flags); } return skip; } // Verify image barriers are compatible with the images they reference. bool CoreChecks::ValidateBarriersToImages(const CMD_BUFFER_STATE *cb_state, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers, const char *func_name) const { bool skip = false; // Scoreboard for checking for duplicate and inconsistent barriers to images struct ImageBarrierScoreboardEntry { uint32_t index; // This is designed for temporary storage within the scope of the API call. If retained storage of the barriers is // required, copies should be made and smart or unique pointers used in some other stucture (or this one refactored) const VkImageMemoryBarrier *barrier; }; using ImageBarrierScoreboardSubresMap = std::unordered_map<VkImageSubresourceRange, ImageBarrierScoreboardEntry>; using ImageBarrierScoreboardImageMap = std::unordered_map<VkImage, ImageBarrierScoreboardSubresMap>; // Scoreboard for duplicate layout transition barriers within the list // Pointers retained in the scoreboard only have the lifetime of *this* call (i.e. within the scope of the API call) ImageBarrierScoreboardImageMap layout_transitions; for (uint32_t i = 0; i < imageMemoryBarrierCount; ++i) { const auto &img_barrier = pImageMemoryBarriers[i]; const std::string barrier_pname = "pImageMemoryBarrier[" + std::to_string(i) + "]"; // Update the scoreboard of layout transitions and check for barriers affecting the same image and subresource // TODO: a higher precision could be gained by adapting the command_buffer image_layout_map logic looking for conflicts // at a per sub-resource level if (img_barrier.oldLayout != img_barrier.newLayout) { const ImageBarrierScoreboardEntry new_entry{i, &img_barrier}; const auto image_it = layout_transitions.find(img_barrier.image); if (image_it != layout_transitions.end()) { auto &subres_map = image_it->second; auto subres_it = subres_map.find(img_barrier.subresourceRange); if (subres_it != subres_map.end()) { auto &entry = subres_it->second; if ((entry.barrier->newLayout != img_barrier.oldLayout) && (img_barrier.oldLayout != VK_IMAGE_LAYOUT_UNDEFINED)) { const VkImageSubresourceRange &range = img_barrier.subresourceRange; skip = LogError( cb_state->commandBuffer, "VUID-VkImageMemoryBarrier-oldLayout-01197", "%s: %s conflicts with earlier entry pImageMemoryBarrier[%u]. %s" " subresourceRange: aspectMask=%u baseMipLevel=%u levelCount=%u, baseArrayLayer=%u, layerCount=%u; " "conflicting barrier transitions image layout from %s when earlier barrier transitioned to layout %s.", func_name, barrier_pname.c_str(), entry.index, report_data->FormatHandle(img_barrier.image).c_str(), range.aspectMask, range.baseMipLevel, range.levelCount, range.baseArrayLayer, range.layerCount, string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(entry.barrier->newLayout)); } entry = new_entry; } else { subres_map[img_barrier.subresourceRange] = new_entry; } } else { layout_transitions[img_barrier.image][img_barrier.subresourceRange] = new_entry; } } auto image_state = GetImageState(img_barrier.image); if (image_state) { VkImageUsageFlags usage_flags = image_state->createInfo.usage; skip |= ValidateBarrierLayoutToImageUsage(img_barrier, false, usage_flags, func_name, barrier_pname.c_str()); skip |= ValidateBarrierLayoutToImageUsage(img_barrier, true, usage_flags, func_name, barrier_pname.c_str()); // Make sure layout is able to be transitioned, currently only presented shared presentable images are locked if (image_state->layout_locked) { // TODO: Add unique id for error when available skip |= LogError( img_barrier.image, 0, "%s: Attempting to transition shared presentable %s" " from layout %s to layout %s, but image has already been presented and cannot have its layout transitioned.", func_name, report_data->FormatHandle(img_barrier.image).c_str(), string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(img_barrier.newLayout)); } const VkImageCreateInfo &image_create_info = image_state->createInfo; const VkFormat image_format = image_create_info.format; const VkImageAspectFlags aspect_mask = img_barrier.subresourceRange.aspectMask; // For a Depth/Stencil image both aspects MUST be set if (FormatIsDepthAndStencil(image_format)) { if (enabled_features.core12.separateDepthStencilLayouts) { if (!(aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))) { skip |= LogError(img_barrier.image, "VUID-VkImageMemoryBarrier-image-03319", "%s: Image barrier %s references %s of format %s that must have either the depth or stencil " "aspects set, but its aspectMask is 0x%" PRIx32 ".", func_name, barrier_pname.c_str(), report_data->FormatHandle(img_barrier.image).c_str(), string_VkFormat(image_format), aspect_mask); } } else { auto const ds_mask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; if ((aspect_mask & ds_mask) != (ds_mask)) { const char *vuid = device_extensions.vk_khr_separate_depth_stencil_layouts ? "VUID-VkImageMemoryBarrier-image-03320" : "VUID-VkImageMemoryBarrier-image-01207"; skip |= LogError(img_barrier.image, vuid, "%s: Image barrier %s references %s of format %s that must have the depth and stencil " "aspects set, but its aspectMask is 0x%" PRIx32 ".", func_name, barrier_pname.c_str(), report_data->FormatHandle(img_barrier.image).c_str(), string_VkFormat(image_format), aspect_mask); } } } const auto *subresource_map = GetImageSubresourceLayoutMap(cb_state, img_barrier.image); if (img_barrier.oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) { // TODO: Set memory invalid which is in mem_tracker currently // Not sure if this needs to be in the ForRange traversal, pulling it out as it is currently invariant with // subresource. } else if (subresource_map && !QueueFamilyIsExternal(img_barrier.srcQueueFamilyIndex)) { bool subres_skip = false; LayoutUseCheckAndMessage layout_check(subresource_map); VkImageSubresourceRange normalized_isr = NormalizeSubresourceRange(*image_state, img_barrier.subresourceRange); for (auto pos = subresource_map->Find(normalized_isr); (pos != subresource_map->End()) && !subres_skip; ++pos) { const auto &value = *pos; if (!layout_check.Check(value.subresource, img_barrier.oldLayout, value.current_layout, value.initial_layout)) { subres_skip = LogError( cb_state->commandBuffer, "VUID-VkImageMemoryBarrier-oldLayout-01197", "%s: For %s you cannot transition the layout of aspect=%d level=%d layer=%d from %s when the " "%s layout is %s.", func_name, report_data->FormatHandle(img_barrier.image).c_str(), value.subresource.aspectMask, value.subresource.mipLevel, value.subresource.arrayLayer, string_VkImageLayout(img_barrier.oldLayout), layout_check.message, string_VkImageLayout(layout_check.layout)); } } skip |= subres_skip; } // checks color format and (single-plane or non-disjoint) // if ycbcr extension is not supported then single-plane and non-disjoint are always both true if ((FormatIsColor(image_format) == true) && ((FormatIsMultiplane(image_format) == false) || (image_state->disjoint == false))) { if (aspect_mask != VK_IMAGE_ASPECT_COLOR_BIT) { const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion) ? "VUID-VkImageMemoryBarrier-image-01671" : "VUID-VkImageMemoryBarrier-image-02902"; skip |= LogError(img_barrier.image, vuid, "%s: Image barrier %s references %s of format %s that must be only VK_IMAGE_ASPECT_COLOR_BIT, " "but its aspectMask is 0x%" PRIx32 ".", func_name, barrier_pname.c_str(), report_data->FormatHandle(img_barrier.image).c_str(), string_VkFormat(image_format), aspect_mask); } } VkImageAspectFlags valid_disjoint_mask = VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT | VK_IMAGE_ASPECT_COLOR_BIT; if ((FormatIsMultiplane(image_format) == true) && (image_state->disjoint == true) && ((aspect_mask & valid_disjoint_mask) == 0)) { skip |= LogError(img_barrier.image, "VUID-VkImageMemoryBarrier-image-01672", "%s: Image barrier %s references %s of format %s has aspectMask (0x%" PRIx32 ") but needs to include either an VK_IMAGE_ASPECT_PLANE_*_BIT or VK_IMAGE_ASPECT_COLOR_BIT.", func_name, barrier_pname.c_str(), report_data->FormatHandle(img_barrier.image).c_str(), string_VkFormat(image_format), aspect_mask); } if ((FormatPlaneCount(image_format) == 2) && ((aspect_mask & VK_IMAGE_ASPECT_PLANE_2_BIT) != 0)) { skip |= LogError(img_barrier.image, "VUID-VkImageMemoryBarrier-image-01673", "%s: Image barrier %s references %s of format %s has only two planes but included " "VK_IMAGE_ASPECT_PLANE_2_BIT in its aspectMask (0x%" PRIx32 ").", func_name, barrier_pname.c_str(), report_data->FormatHandle(img_barrier.image).c_str(), string_VkFormat(image_format), aspect_mask); } } } return skip; } bool CoreChecks::IsReleaseOp(CMD_BUFFER_STATE *cb_state, const VkImageMemoryBarrier &barrier) const { if (!IsTransferOp(&barrier)) return false; auto pool = cb_state->command_pool.get(); return pool && TempIsReleaseOp<VkImageMemoryBarrier, true>(pool, &barrier); } template <typename Barrier> bool CoreChecks::ValidateQFOTransferBarrierUniqueness(const char *func_name, const CMD_BUFFER_STATE *cb_state, uint32_t barrier_count, const Barrier *barriers) const { using BarrierRecord = QFOTransferBarrier<Barrier>; bool skip = false; auto pool = cb_state->command_pool.get(); auto &barrier_sets = GetQFOBarrierSets(cb_state, typename BarrierRecord::Tag()); const char *barrier_name = BarrierRecord::BarrierName(); const char *handle_name = BarrierRecord::HandleName(); const char *transfer_type = nullptr; for (uint32_t b = 0; b < barrier_count; b++) { if (!IsTransferOp(&barriers[b])) continue; const BarrierRecord *barrier_record = nullptr; if (TempIsReleaseOp<Barrier, true /* Assume IsTransfer */>(pool, &barriers[b]) && !QueueFamilyIsExternal(barriers[b].dstQueueFamilyIndex)) { const auto found = barrier_sets.release.find(barriers[b]); if (found != barrier_sets.release.cend()) { barrier_record = &(*found); transfer_type = "releasing"; } } else if (IsAcquireOp<Barrier, true /*Assume IsTransfer */>(pool, &barriers[b]) && !QueueFamilyIsExternal(barriers[b].srcQueueFamilyIndex)) { const auto found = barrier_sets.acquire.find(barriers[b]); if (found != barrier_sets.acquire.cend()) { barrier_record = &(*found); transfer_type = "acquiring"; } } if (barrier_record != nullptr) { skip |= LogWarning(cb_state->commandBuffer, BarrierRecord::ErrMsgDuplicateQFOInCB(), "%s: %s at index %" PRIu32 " %s queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32 " to dstQueueFamilyIndex %" PRIu32 " duplicates existing barrier recorded in this command buffer.", func_name, barrier_name, b, transfer_type, handle_name, report_data->FormatHandle(barrier_record->handle).c_str(), barrier_record->srcQueueFamilyIndex, barrier_record->dstQueueFamilyIndex); } } return skip; } VulkanTypedHandle BarrierTypedHandle(const VkImageMemoryBarrier &barrier) { return VulkanTypedHandle(barrier.image, kVulkanObjectTypeImage); } const IMAGE_STATE *BarrierHandleState(const ValidationStateTracker &device_state, const VkImageMemoryBarrier &barrier) { return device_state.GetImageState(barrier.image); } VulkanTypedHandle BarrierTypedHandle(const VkBufferMemoryBarrier &barrier) { return VulkanTypedHandle(barrier.buffer, kVulkanObjectTypeBuffer); } const BUFFER_STATE *BarrierHandleState(const ValidationStateTracker &device_state, const VkBufferMemoryBarrier &barrier) { return device_state.GetBufferState(barrier.buffer); } VkBuffer BarrierHandle(const VkBufferMemoryBarrier &barrier) { return barrier.buffer; } template <typename Barrier> void CoreChecks::RecordBarrierArrayValidationInfo(const char *func_name, CMD_BUFFER_STATE *cb_state, uint32_t barrier_count, const Barrier *barriers) { auto pool = cb_state->command_pool.get(); auto &barrier_sets = GetQFOBarrierSets(cb_state, typename QFOTransferBarrier<Barrier>::Tag()); for (uint32_t b = 0; b < barrier_count; b++) { auto &barrier = barriers[b]; if (IsTransferOp(&barrier)) { if (TempIsReleaseOp<Barrier, true /* Assume IsTransfer*/>(pool, &barrier) && !QueueFamilyIsExternal(barrier.dstQueueFamilyIndex)) { barrier_sets.release.emplace(barrier); } else if (IsAcquireOp<Barrier, true /*Assume IsTransfer */>(pool, &barrier) && !QueueFamilyIsExternal(barrier.srcQueueFamilyIndex)) { barrier_sets.acquire.emplace(barrier); } } const uint32_t src_queue_family = barrier.srcQueueFamilyIndex; const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex; if (!QueueFamilyIsIgnored(src_queue_family) && !QueueFamilyIsIgnored(dst_queue_family)) { // Only enqueue submit time check if it is needed. If more submit time checks are added, change the criteria // TODO create a better named list, or rename the submit time lists to something that matches the broader usage... auto handle_state = BarrierHandleState(*this, barrier); bool mode_concurrent = handle_state ? handle_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT : false; if (!mode_concurrent) { const auto typed_handle = BarrierTypedHandle(barrier); cb_state->queue_submit_functions.emplace_back( [func_name, cb_state, typed_handle, src_queue_family, dst_queue_family]( const ValidationStateTracker *device_data, const QUEUE_STATE *queue_state) { return ValidateConcurrentBarrierAtSubmit(device_data, queue_state, func_name, cb_state, typed_handle, src_queue_family, dst_queue_family); }); } } } } bool CoreChecks::ValidateBarriersQFOTransferUniqueness(const char *func_name, const CMD_BUFFER_STATE *cb_state, uint32_t bufferBarrierCount, const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount, const VkImageMemoryBarrier *pImageMemBarriers) const { bool skip = false; skip |= ValidateQFOTransferBarrierUniqueness(func_name, cb_state, bufferBarrierCount, pBufferMemBarriers); skip |= ValidateQFOTransferBarrierUniqueness(func_name, cb_state, imageMemBarrierCount, pImageMemBarriers); return skip; } void CoreChecks::RecordBarrierValidationInfo(const char *func_name, CMD_BUFFER_STATE *cb_state, uint32_t bufferBarrierCount, const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount, const VkImageMemoryBarrier *pImageMemBarriers) { RecordBarrierArrayValidationInfo(func_name, cb_state, bufferBarrierCount, pBufferMemBarriers); RecordBarrierArrayValidationInfo(func_name, cb_state, imageMemBarrierCount, pImageMemBarriers); } template <typename BarrierRecord, typename Scoreboard> bool CoreChecks::ValidateAndUpdateQFOScoreboard(const debug_report_data *report_data, const CMD_BUFFER_STATE *cb_state, const char *operation, const BarrierRecord &barrier, Scoreboard *scoreboard) const { // Record to the scoreboard or report that we have a duplication bool skip = false; auto inserted = scoreboard->insert(std::make_pair(barrier, cb_state)); if (!inserted.second && inserted.first->second != cb_state) { // This is a duplication (but don't report duplicates from the same CB, as we do that at record time LogObjectList objlist(cb_state->commandBuffer); objlist.add(barrier.handle); objlist.add(inserted.first->second->commandBuffer); skip = LogWarning(objlist, BarrierRecord::ErrMsgDuplicateQFOInSubmit(), "%s: %s %s queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32 " to dstQueueFamilyIndex %" PRIu32 " duplicates existing barrier submitted in this batch from %s.", "vkQueueSubmit()", BarrierRecord::BarrierName(), operation, BarrierRecord::HandleName(), report_data->FormatHandle(barrier.handle).c_str(), barrier.srcQueueFamilyIndex, barrier.dstQueueFamilyIndex, report_data->FormatHandle(inserted.first->second->commandBuffer).c_str()); } return skip; } template <typename Barrier> bool CoreChecks::ValidateQueuedQFOTransferBarriers(const CMD_BUFFER_STATE *cb_state, QFOTransferCBScoreboards<Barrier> *scoreboards) const { using BarrierRecord = QFOTransferBarrier<Barrier>; using TypeTag = typename BarrierRecord::Tag; bool skip = false; const auto &cb_barriers = GetQFOBarrierSets(cb_state, TypeTag()); const GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers = GetGlobalQFOReleaseBarrierMap(TypeTag()); const char *barrier_name = BarrierRecord::BarrierName(); const char *handle_name = BarrierRecord::HandleName(); // No release should have an extant duplicate (WARNING) for (const auto &release : cb_barriers.release) { // Check the global pending release barriers const auto set_it = global_release_barriers.find(release.handle); if (set_it != global_release_barriers.cend()) { const QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second; const auto found = set_for_handle.find(release); if (found != set_for_handle.cend()) { skip |= LogWarning(cb_state->commandBuffer, BarrierRecord::ErrMsgDuplicateQFOSubmitted(), "%s: %s releasing queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32 " to dstQueueFamilyIndex %" PRIu32 " duplicates existing barrier queued for execution, without intervening acquire operation.", "vkQueueSubmit()", barrier_name, handle_name, report_data->FormatHandle(found->handle).c_str(), found->srcQueueFamilyIndex, found->dstQueueFamilyIndex); } } skip |= ValidateAndUpdateQFOScoreboard(report_data, cb_state, "releasing", release, &scoreboards->release); } // Each acquire must have a matching release (ERROR) for (const auto &acquire : cb_barriers.acquire) { const auto set_it = global_release_barriers.find(acquire.handle); bool matching_release_found = false; if (set_it != global_release_barriers.cend()) { const QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second; matching_release_found = set_for_handle.find(acquire) != set_for_handle.cend(); } if (!matching_release_found) { skip |= LogError(cb_state->commandBuffer, BarrierRecord::ErrMsgMissingQFOReleaseInSubmit(), "%s: in submitted command buffer %s acquiring ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32 " to dstQueueFamilyIndex %" PRIu32 " has no matching release barrier queued for execution.", "vkQueueSubmit()", barrier_name, handle_name, report_data->FormatHandle(acquire.handle).c_str(), acquire.srcQueueFamilyIndex, acquire.dstQueueFamilyIndex); } skip |= ValidateAndUpdateQFOScoreboard(report_data, cb_state, "acquiring", acquire, &scoreboards->acquire); } return skip; } bool CoreChecks::ValidateQueuedQFOTransfers(const CMD_BUFFER_STATE *cb_state, QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards, QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) const { bool skip = false; skip |= ValidateQueuedQFOTransferBarriers<VkImageMemoryBarrier>(cb_state, qfo_image_scoreboards); skip |= ValidateQueuedQFOTransferBarriers<VkBufferMemoryBarrier>(cb_state, qfo_buffer_scoreboards); return skip; } template <typename Barrier> void CoreChecks::RecordQueuedQFOTransferBarriers(CMD_BUFFER_STATE *cb_state) { using BarrierRecord = QFOTransferBarrier<Barrier>; using TypeTag = typename BarrierRecord::Tag; const auto &cb_barriers = GetQFOBarrierSets(cb_state, TypeTag()); GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers = GetGlobalQFOReleaseBarrierMap(TypeTag()); // Add release barriers from this submit to the global map for (const auto &release : cb_barriers.release) { // the global barrier list is mapped by resource handle to allow cleanup on resource destruction // NOTE: We're using [] because creation of a Set is a needed side effect for new handles global_release_barriers[release.handle].insert(release); } // Erase acquired barriers from this submit from the global map -- essentially marking releases as consumed for (const auto &acquire : cb_barriers.acquire) { // NOTE: We're not using [] because we don't want to create entries for missing releases auto set_it = global_release_barriers.find(acquire.handle); if (set_it != global_release_barriers.end()) { QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second; set_for_handle.erase(acquire); if (set_for_handle.size() == 0) { // Clean up empty sets global_release_barriers.erase(set_it); } } } } void CoreChecks::RecordQueuedQFOTransfers(CMD_BUFFER_STATE *cb_state) { RecordQueuedQFOTransferBarriers<VkImageMemoryBarrier>(cb_state); RecordQueuedQFOTransferBarriers<VkBufferMemoryBarrier>(cb_state); } // Avoid making the template globally visible by exporting the one instance of it we need. void CoreChecks::EraseQFOImageRelaseBarriers(const VkImage &image) { EraseQFOReleaseBarriers<VkImageMemoryBarrier>(image); } void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t memBarrierCount, const VkImageMemoryBarrier *pImgMemBarriers) { for (uint32_t i = 0; i < memBarrierCount; ++i) { const auto &mem_barrier = pImgMemBarriers[i]; // For ownership transfers, the barrier is specified twice; as a release // operation on the yielding queue family, and as an acquire operation // on the acquiring queue family. This barrier may also include a layout // transition, which occurs 'between' the two operations. For validation // purposes it doesn't seem important which side performs the layout // transition, but it must not be performed twice. We'll arbitrarily // choose to perform it as part of the acquire operation. // // However, we still need to record initial layout for the "initial layout" validation const bool is_release_op = IsReleaseOp(cb_state, mem_barrier); auto *image_state = GetImageState(mem_barrier.image); if (!image_state) continue; RecordTransitionImageLayout(cb_state, image_state, mem_barrier, is_release_op); } } void CoreChecks::RecordTransitionImageLayout(CMD_BUFFER_STATE *cb_state, const IMAGE_STATE *image_state, const VkImageMemoryBarrier &mem_barrier, bool is_release_op) { VkImageSubresourceRange normalized_isr = NormalizeSubresourceRange(*image_state, mem_barrier.subresourceRange); const auto &image_create_info = image_state->createInfo; // Special case for 3D images with VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR flag bit, where <extent.depth> and // <arrayLayers> can potentially alias. When recording layout for the entire image, pre-emptively record layouts // for all (potential) layer sub_resources. if (0 != (image_create_info.flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR)) { normalized_isr.baseArrayLayer = 0; normalized_isr.layerCount = image_create_info.extent.depth; // Treat each depth slice as a layer subresource } VkImageLayout initial_layout = mem_barrier.oldLayout; // Layout transitions in external instance are not tracked, so don't validate initial layout. if (QueueFamilyIsExternal(mem_barrier.srcQueueFamilyIndex)) { initial_layout = VK_IMAGE_LAYOUT_UNDEFINED; } if (is_release_op) { SetImageInitialLayout(cb_state, *image_state, normalized_isr, mem_barrier.oldLayout); } else { SetImageLayout(cb_state, *image_state, normalized_isr, mem_barrier.newLayout, initial_layout); } } bool CoreChecks::VerifyImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state, const VkImageSubresourceRange &range, VkImageAspectFlags aspect_mask, VkImageLayout explicit_layout, VkImageLayout optimal_layout, const char *caller, const char *layout_invalid_msg_code, const char *layout_mismatch_msg_code, bool *error) const { if (disabled[image_layout_validation]) return false; assert(cb_node); assert(image_state); const auto image = image_state->image; bool skip = false; const auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image); if (subresource_map) { bool subres_skip = false; LayoutUseCheckAndMessage layout_check(subresource_map, aspect_mask); for (auto pos = subresource_map->Find(range); (pos != subresource_map->End()) && !subres_skip; ++pos) { if (!layout_check.Check(pos->subresource, explicit_layout, pos->current_layout, pos->initial_layout)) { *error = true; subres_skip |= LogError(cb_node->commandBuffer, layout_mismatch_msg_code, "%s: Cannot use %s (layer=%u mip=%u) with specific layout %s that doesn't match the " "%s layout %s.", caller, report_data->FormatHandle(image).c_str(), pos->subresource.arrayLayer, pos->subresource.mipLevel, string_VkImageLayout(explicit_layout), layout_check.message, string_VkImageLayout(layout_check.layout)); } } skip |= subres_skip; } // If optimal_layout is not UNDEFINED, check that layout matches optimal for this case if ((VK_IMAGE_LAYOUT_UNDEFINED != optimal_layout) && (explicit_layout != optimal_layout)) { if (VK_IMAGE_LAYOUT_GENERAL == explicit_layout) { if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) { // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning. skip |= LogPerformanceWarning(cb_node->commandBuffer, kVUID_Core_DrawState_InvalidImageLayout, "%s: For optimal performance %s layout should be %s instead of GENERAL.", caller, report_data->FormatHandle(image).c_str(), string_VkImageLayout(optimal_layout)); } } else if (device_extensions.vk_khr_shared_presentable_image) { if (image_state->shared_presentable) { if (VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR != explicit_layout) { skip |= LogError(device, layout_invalid_msg_code, "%s: Layout for shared presentable image is %s but must be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR.", caller, string_VkImageLayout(optimal_layout)); } } } else { *error = true; skip |= LogError(cb_node->commandBuffer, layout_invalid_msg_code, "%s: Layout for %s is %s but can only be %s or VK_IMAGE_LAYOUT_GENERAL.", caller, report_data->FormatHandle(image).c_str(), string_VkImageLayout(explicit_layout), string_VkImageLayout(optimal_layout)); } } return skip; } bool CoreChecks::VerifyImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state, const VkImageSubresourceLayers &subLayers, VkImageLayout explicit_layout, VkImageLayout optimal_layout, const char *caller, const char *layout_invalid_msg_code, const char *layout_mismatch_msg_code, bool *error) const { return VerifyImageLayout(cb_node, image_state, RangeFromLayers(subLayers), explicit_layout, optimal_layout, caller, layout_invalid_msg_code, layout_mismatch_msg_code, error); } void CoreChecks::TransitionFinalSubpassLayouts(CMD_BUFFER_STATE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin, FRAMEBUFFER_STATE *framebuffer_state) { auto renderPass = GetRenderPassState(pRenderPassBegin->renderPass); if (!renderPass) return; const VkRenderPassCreateInfo2KHR *pRenderPassInfo = renderPass->createInfo.ptr(); if (framebuffer_state) { IMAGE_VIEW_STATE *view_state = nullptr; for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) { if (framebuffer_state->createInfo.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) { const auto attachment_info = lvl_find_in_chain<VkRenderPassAttachmentBeginInfoKHR>(pRenderPassBegin->pNext); if (attachment_info) view_state = GetImageViewState(attachment_info->pAttachments[i]); } else { view_state = GetAttachmentImageViewState(pCB, framebuffer_state, i); } if (view_state) { VkImageLayout stencil_layout = kInvalidLayout; const auto *attachment_description_stencil_layout = lvl_find_in_chain<VkAttachmentDescriptionStencilLayoutKHR>(pRenderPassInfo->pAttachments[i].pNext); if (attachment_description_stencil_layout) { stencil_layout = attachment_description_stencil_layout->stencilFinalLayout; } SetImageViewLayout(pCB, *view_state, pRenderPassInfo->pAttachments[i].finalLayout, stencil_layout); } } } } #ifdef VK_USE_PLATFORM_ANDROID_KHR // Android-specific validation that uses types defined only with VK_USE_PLATFORM_ANDROID_KHR // This could also move into a seperate core_validation_android.cpp file... ? // // AHB-specific validation within non-AHB APIs // bool CoreChecks::ValidateCreateImageANDROID(const debug_report_data *report_data, const VkImageCreateInfo *create_info) const { bool skip = false; const VkExternalFormatANDROID *ext_fmt_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext); if (ext_fmt_android) { if (0 != ext_fmt_android->externalFormat) { if (VK_FORMAT_UNDEFINED != create_info->format) { skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-01974", "vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with non-zero " "externalFormat, but the VkImageCreateInfo's format is not VK_FORMAT_UNDEFINED."); } if (0 != (VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT & create_info->flags)) { skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02396", "vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with " "non-zero externalFormat, but flags include VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT."); } if (0 != (~VK_IMAGE_USAGE_SAMPLED_BIT & create_info->usage)) { skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02397", "vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with " "non-zero externalFormat, but usage includes bits (0x%" PRIx64 ") other than VK_IMAGE_USAGE_SAMPLED_BIT.", create_info->usage); } if (VK_IMAGE_TILING_OPTIMAL != create_info->tiling) { skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02398", "vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with " "non-zero externalFormat, but layout is not VK_IMAGE_TILING_OPTIMAL."); } } if ((0 != ext_fmt_android->externalFormat) && (ahb_ext_formats_map.find(ext_fmt_android->externalFormat) == ahb_ext_formats_map.end())) { skip |= LogError(device, "VUID-VkExternalFormatANDROID-externalFormat-01894", "vkCreateImage(): Chained VkExternalFormatANDROID struct contains a non-zero externalFormat (%" PRIu64 ") which has " "not been previously retrieved by vkGetAndroidHardwareBufferPropertiesANDROID().", ext_fmt_android->externalFormat); } } if ((nullptr == ext_fmt_android) || (0 == ext_fmt_android->externalFormat)) { if (VK_FORMAT_UNDEFINED == create_info->format) { skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-01975", "vkCreateImage(): VkImageCreateInfo struct's format is VK_FORMAT_UNDEFINED, but either does not have a " "chained VkExternalFormatANDROID struct or the struct exists but has an externalFormat of 0."); } } const VkExternalMemoryImageCreateInfo *emici = lvl_find_in_chain<VkExternalMemoryImageCreateInfo>(create_info->pNext); if (emici && (emici->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)) { if (create_info->imageType != VK_IMAGE_TYPE_2D) { skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02393", "vkCreateImage(): VkImageCreateInfo struct with imageType %s has chained VkExternalMemoryImageCreateInfo " "struct with handleType VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.", string_VkImageType(create_info->imageType)); } if ((create_info->mipLevels != 1) && (create_info->mipLevels != FullMipChainLevels(create_info->extent))) { skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02394", "vkCreateImage(): VkImageCreateInfo struct with chained VkExternalMemoryImageCreateInfo struct of " "handleType VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID " "specifies mipLevels = %" PRId32 " (full chain mipLevels are %" PRId32 ").", create_info->mipLevels, FullMipChainLevels(create_info->extent)); } } return skip; } bool CoreChecks::ValidateCreateImageViewANDROID(const VkImageViewCreateInfo *create_info) const { bool skip = false; const IMAGE_STATE *image_state = GetImageState(create_info->image); if (image_state->has_ahb_format) { if (VK_FORMAT_UNDEFINED != create_info->format) { skip |= LogError(create_info->image, "VUID-VkImageViewCreateInfo-image-02399", "vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct, but " "format member is %s and must be VK_FORMAT_UNDEFINED.", string_VkFormat(create_info->format)); } // Chain must include a compatible ycbcr conversion bool conv_found = false; uint64_t external_format = 0; const VkSamplerYcbcrConversionInfo *ycbcr_conv_info = lvl_find_in_chain<VkSamplerYcbcrConversionInfo>(create_info->pNext); if (ycbcr_conv_info != nullptr) { VkSamplerYcbcrConversion conv_handle = ycbcr_conv_info->conversion; if (ycbcr_conversion_ahb_fmt_map.find(conv_handle) != ycbcr_conversion_ahb_fmt_map.end()) { conv_found = true; external_format = ycbcr_conversion_ahb_fmt_map.at(conv_handle); } } if ((!conv_found) || (external_format != image_state->ahb_format)) { skip |= LogError(create_info->image, "VUID-VkImageViewCreateInfo-image-02400", "vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct with " "an externalFormat (%" PRIu64 ") but needs a chained VkSamplerYcbcrConversionInfo struct with a VkSamplerYcbcrConversion created " "with the same external format.", image_state->ahb_format); } // Errors in create_info swizzles if (IsIdentitySwizzle(create_info->components) == false) { skip |= LogError( create_info->image, "VUID-VkImageViewCreateInfo-image-02401", "vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct, but " "includes one or more non-identity component swizzles, r swizzle = %s, g swizzle = %s, b swizzle = %s, a swizzle " "= %s.", string_VkComponentSwizzle(create_info->components.r), string_VkComponentSwizzle(create_info->components.g), string_VkComponentSwizzle(create_info->components.b), string_VkComponentSwizzle(create_info->components.a)); } } return skip; } bool CoreChecks::ValidateGetImageSubresourceLayoutANDROID(const VkImage image) const { bool skip = false; const IMAGE_STATE *image_state = GetImageState(image); if (image_state != nullptr) { if (image_state->external_ahb && (0 == image_state->GetBoundMemory().size())) { skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-image-01895", "vkGetImageSubresourceLayout(): Attempt to query layout from an image created with " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType which has not yet been " "bound to memory."); } } return skip; } #else bool CoreChecks::ValidateCreateImageANDROID(const debug_report_data *report_data, const VkImageCreateInfo *create_info) const { return false; } bool CoreChecks::ValidateCreateImageViewANDROID(const VkImageViewCreateInfo *create_info) const { return false; } bool CoreChecks::ValidateGetImageSubresourceLayoutANDROID(const VkImage image) const { return false; } #endif // VK_USE_PLATFORM_ANDROID_KHR bool CoreChecks::ValidateImageFormatFeatures(const VkImageCreateInfo *pCreateInfo) const { bool skip = false; // validates based on imageCreateFormatFeatures from vkspec.html#resources-image-creation-limits VkFormatFeatureFlags tiling_features = VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM; const VkImageTiling image_tiling = pCreateInfo->tiling; const VkFormat image_format = pCreateInfo->format; if (image_format == VK_FORMAT_UNDEFINED) { // VU 01975 states format can't be undefined unless an android externalFormat #ifdef VK_USE_PLATFORM_ANDROID_KHR const VkExternalFormatANDROID *ext_fmt_android = lvl_find_in_chain<VkExternalFormatANDROID>(pCreateInfo->pNext); if ((image_tiling == VK_IMAGE_TILING_OPTIMAL) && (ext_fmt_android != nullptr) && (0 != ext_fmt_android->externalFormat)) { auto it = ahb_ext_formats_map.find(ext_fmt_android->externalFormat); if (it != ahb_ext_formats_map.end()) { tiling_features = it->second; } } #endif } else if (image_tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) { uint64_t drm_format_modifier = 0; const VkImageDrmFormatModifierExplicitCreateInfoEXT *drm_explicit = lvl_find_in_chain<VkImageDrmFormatModifierExplicitCreateInfoEXT>(pCreateInfo->pNext); const VkImageDrmFormatModifierListCreateInfoEXT *drm_implicit = lvl_find_in_chain<VkImageDrmFormatModifierListCreateInfoEXT>(pCreateInfo->pNext); if (drm_explicit != nullptr) { drm_format_modifier = drm_explicit->drmFormatModifier; } else { // VUID 02261 makes sure its only explict or implict in parameter checking assert(drm_implicit != nullptr); for (uint32_t i = 0; i < drm_implicit->drmFormatModifierCount; i++) { drm_format_modifier |= drm_implicit->pDrmFormatModifiers[i]; } } VkFormatProperties2 format_properties_2 = {VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, nullptr}; VkDrmFormatModifierPropertiesListEXT drm_properties_list = {VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT, nullptr}; format_properties_2.pNext = (void *)&drm_properties_list; DispatchGetPhysicalDeviceFormatProperties2(physical_device, image_format, &format_properties_2); std::vector<VkDrmFormatModifierPropertiesEXT> drm_properties; drm_properties.resize(drm_properties_list.drmFormatModifierCount); drm_properties_list.pDrmFormatModifierProperties = &drm_properties[0]; DispatchGetPhysicalDeviceFormatProperties2(physical_device, image_format, &format_properties_2); for (uint32_t i = 0; i < drm_properties_list.drmFormatModifierCount; i++) { if ((drm_properties_list.pDrmFormatModifierProperties[i].drmFormatModifier & drm_format_modifier) != 0) { tiling_features |= drm_properties_list.pDrmFormatModifierProperties[i].drmFormatModifierTilingFeatures; } } } else { VkFormatProperties format_properties = GetPDFormatProperties(image_format); tiling_features = (image_tiling == VK_IMAGE_TILING_LINEAR) ? format_properties.linearTilingFeatures : format_properties.optimalTilingFeatures; } // Lack of disjoint format feature support while using the flag if (FormatIsMultiplane(image_format) && ((pCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT) != 0) && ((tiling_features & VK_FORMAT_FEATURE_DISJOINT_BIT) == 0)) { skip |= LogError(device, "VUID-VkImageCreateInfo-imageCreateFormatFeatures-02260", "vkCreateImage(): can't use VK_IMAGE_CREATE_DISJOINT_BIT because %s doesn't support " "VK_FORMAT_FEATURE_DISJOINT_BIT based on imageCreateFormatFeatures.", string_VkFormat(pCreateInfo->format)); } return skip; } bool CoreChecks::PreCallValidateCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImage *pImage) const { bool skip = false; if (device_extensions.vk_android_external_memory_android_hardware_buffer) { skip |= ValidateCreateImageANDROID(report_data, pCreateInfo); } else { // These checks are omitted or replaced when Android HW Buffer extension is active if (pCreateInfo->format == VK_FORMAT_UNDEFINED) { return LogError(device, "VUID-VkImageCreateInfo-format-00943", "vkCreateImage(): VkFormat for image must not be VK_FORMAT_UNDEFINED."); } } if (pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) { if (VK_IMAGE_TYPE_2D != pCreateInfo->imageType) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-00949", "vkCreateImage(): Image type must be VK_IMAGE_TYPE_2D when VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT " "flag bit is set"); } if ((pCreateInfo->extent.width != pCreateInfo->extent.height) || (pCreateInfo->arrayLayers < 6)) { skip |= LogError(device, "VUID-VkImageCreateInfo-imageType-00954", "vkCreateImage(): If VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT flag bit is set, width (%d) must equal " "height (%d) and arrayLayers (%d) must be >= 6.", pCreateInfo->extent.width, pCreateInfo->extent.height, pCreateInfo->arrayLayers); } } const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits; VkImageUsageFlags attach_flags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT; if ((pCreateInfo->usage & attach_flags) && (pCreateInfo->extent.width > device_limits->maxFramebufferWidth)) { skip |= LogError(device, "VUID-VkImageCreateInfo-usage-00964", "vkCreateImage(): Image usage flags include a frame buffer attachment bit and image width exceeds device " "maxFramebufferWidth."); } if ((pCreateInfo->usage & attach_flags) && (pCreateInfo->extent.height > device_limits->maxFramebufferHeight)) { skip |= LogError(device, "VUID-VkImageCreateInfo-usage-00965", "vkCreateImage(): Image usage flags include a frame buffer attachment bit and image height exceeds device " "maxFramebufferHeight"); } if (device_extensions.vk_ext_fragment_density_map || device_extensions.vk_ext_fragment_density_map_2) { uint32_t ceiling_width = (uint32_t)ceil((float)device_limits->maxFramebufferWidth / std::max((float)phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.width, 1.0f)); if ((pCreateInfo->usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) && (pCreateInfo->extent.width > ceiling_width)) { skip |= LogError(device, "VUID-VkImageCreateInfo-usage-02559", "vkCreateImage(): Image usage flags include a fragment density map bit and image width (%u) exceeds the " "ceiling of device " "maxFramebufferWidth (%u) / minFragmentDensityTexelSize.width (%u). The ceiling value: %u", pCreateInfo->extent.width, device_limits->maxFramebufferWidth, phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.width, ceiling_width); } uint32_t ceiling_height = (uint32_t)ceil((float)device_limits->maxFramebufferHeight / std::max((float)phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.height, 1.0f)); if ((pCreateInfo->usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) && (pCreateInfo->extent.height > ceiling_height)) { skip |= LogError(device, "VUID-VkImageCreateInfo-usage-02560", "vkCreateImage(): Image usage flags include a fragment density map bit and image height (%u) exceeds the " "ceiling of device " "maxFramebufferHeight (%u) / minFragmentDensityTexelSize.height (%u). The ceiling value: %u", pCreateInfo->extent.height, device_limits->maxFramebufferHeight, phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.height, ceiling_height); } } VkImageFormatProperties format_limits = {}; VkResult result = VK_SUCCESS; if (pCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) { result = DispatchGetPhysicalDeviceImageFormatProperties(physical_device, pCreateInfo->format, pCreateInfo->imageType, pCreateInfo->tiling, pCreateInfo->usage, pCreateInfo->flags, &format_limits); } else { auto modifier_list = lvl_find_in_chain<VkImageDrmFormatModifierListCreateInfoEXT>(pCreateInfo->pNext); auto explicit_modifier = lvl_find_in_chain<VkImageDrmFormatModifierExplicitCreateInfoEXT>(pCreateInfo->pNext); if (modifier_list) { for (uint32_t i = 0; i < modifier_list->drmFormatModifierCount; i++) { auto drm_format_modifier = lvl_init_struct<VkPhysicalDeviceImageDrmFormatModifierInfoEXT>(); drm_format_modifier.drmFormatModifier = modifier_list->pDrmFormatModifiers[i]; auto image_format_info = lvl_init_struct<VkPhysicalDeviceImageFormatInfo2>(&drm_format_modifier); image_format_info.type = pCreateInfo->imageType; image_format_info.format = pCreateInfo->format; image_format_info.tiling = pCreateInfo->tiling; image_format_info.usage = pCreateInfo->usage; image_format_info.flags = pCreateInfo->flags; auto image_format_properties = lvl_init_struct<VkImageFormatProperties2>(); result = DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, &image_format_info, &image_format_properties); format_limits = image_format_properties.imageFormatProperties; /* The application gives a list of modifier and the driver * selects one. If one is wrong, stop there. */ if (result != VK_SUCCESS) break; } } else if (explicit_modifier) { auto drm_format_modifier = lvl_init_struct<VkPhysicalDeviceImageDrmFormatModifierInfoEXT>(); drm_format_modifier.drmFormatModifier = explicit_modifier->drmFormatModifier; auto image_format_info = lvl_init_struct<VkPhysicalDeviceImageFormatInfo2>(&drm_format_modifier); image_format_info.type = pCreateInfo->imageType; image_format_info.format = pCreateInfo->format; image_format_info.tiling = pCreateInfo->tiling; image_format_info.usage = pCreateInfo->usage; image_format_info.flags = pCreateInfo->flags; auto image_format_properties = lvl_init_struct<VkImageFormatProperties2>(); result = DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, &image_format_info, &image_format_properties); format_limits = image_format_properties.imageFormatProperties; } } // 1. vkGetPhysicalDeviceImageFormatProperties[2] only success code is VK_SUCCESS // 2. If call returns an error, then "imageCreateImageFormatPropertiesList" is defined to be the empty list // 3. All values in 02251 are undefined if "imageCreateImageFormatPropertiesList" is empty. if (result != VK_SUCCESS) { // External memory will always have a "imageCreateImageFormatPropertiesList" so skip #ifdef VK_USE_PLATFORM_ANDROID_KHR if (!lvl_find_in_chain<VkExternalFormatANDROID>(pCreateInfo->pNext)) #endif // VK_USE_PLATFORM_ANDROID_KHR skip |= LogError(device, "VUID-VkImageCreateInfo-imageCreateMaxMipLevels-02251", "vkCreateImage(): Format %s is not supported for this combination of parameters and " "VkGetPhysicalDeviceImageFormatProperties returned back %s.", string_VkFormat(pCreateInfo->format), string_VkResult(result)); } else { if (pCreateInfo->mipLevels > format_limits.maxMipLevels) { const char *format_string = string_VkFormat(pCreateInfo->format); skip |= LogError(device, "VUID-VkImageCreateInfo-mipLevels-02255", "vkCreateImage(): Image mip levels=%d exceed image format maxMipLevels=%d for format %s.", pCreateInfo->mipLevels, format_limits.maxMipLevels, format_string); } uint64_t texel_count = (uint64_t)pCreateInfo->extent.width * (uint64_t)pCreateInfo->extent.height * (uint64_t)pCreateInfo->extent.depth * (uint64_t)pCreateInfo->arrayLayers * (uint64_t)pCreateInfo->samples; uint64_t total_size = (uint64_t)std::ceil(FormatTexelSize(pCreateInfo->format) * texel_count); // Round up to imageGranularity boundary VkDeviceSize imageGranularity = phys_dev_props.limits.bufferImageGranularity; uint64_t ig_mask = imageGranularity - 1; total_size = (total_size + ig_mask) & ~ig_mask; if (total_size > format_limits.maxResourceSize) { skip |= LogWarning(device, kVUID_Core_Image_InvalidFormatLimitsViolation, "vkCreateImage(): resource size exceeds allowable maximum Image resource size = 0x%" PRIxLEAST64 ", maximum resource size = 0x%" PRIxLEAST64 " ", total_size, format_limits.maxResourceSize); } if (pCreateInfo->arrayLayers > format_limits.maxArrayLayers) { skip |= LogError(device, "VUID-VkImageCreateInfo-arrayLayers-02256", "vkCreateImage(): arrayLayers=%d exceeds allowable maximum supported by format of %d.", pCreateInfo->arrayLayers, format_limits.maxArrayLayers); } if ((pCreateInfo->samples & format_limits.sampleCounts) == 0) { skip |= LogError(device, "VUID-VkImageCreateInfo-samples-02258", "vkCreateImage(): samples %s is not supported by format 0x%.8X.", string_VkSampleCountFlagBits(pCreateInfo->samples), format_limits.sampleCounts); } if (pCreateInfo->extent.width > format_limits.maxExtent.width) { skip |= LogError(device, "VUID-VkImageCreateInfo-extent-02252", "vkCreateImage(): extent.width %u exceeds allowable maximum image extent width %u.", pCreateInfo->extent.width, format_limits.maxExtent.width); } if (pCreateInfo->extent.height > format_limits.maxExtent.height) { skip |= LogError(device, "VUID-VkImageCreateInfo-extent-02253", "vkCreateImage(): extent.height %u exceeds allowable maximum image extent height %u.", pCreateInfo->extent.height, format_limits.maxExtent.height); } if (pCreateInfo->extent.depth > format_limits.maxExtent.depth) { skip |= LogError(device, "VUID-VkImageCreateInfo-extent-02254", "vkCreateImage(): extent.depth %u exceeds allowable maximum image extent depth %u.", pCreateInfo->extent.depth, format_limits.maxExtent.depth); } } // Tests for "Formats requiring sampler YCBCR conversion for VK_IMAGE_ASPECT_COLOR_BIT image views" if (FormatRequiresYcbcrConversion(pCreateInfo->format)) { if (!enabled_features.ycbcr_image_array_features.ycbcrImageArrays && pCreateInfo->arrayLayers != 1) { const char *error_vuid = (device_extensions.vk_ext_ycbcr_image_arrays) ? "VUID-VkImageCreateInfo-format-02653" : "VUID-VkImageCreateInfo-format-02564"; skip |= LogError(device, error_vuid, "vkCreateImage(): arrayLayers = %d, but when the ycbcrImagesArrays feature is not enabled and using a " "YCbCr Conversion format, arrayLayers must be 1", pCreateInfo->arrayLayers); } if (pCreateInfo->mipLevels != 1) { skip |= LogError(device, "VUID-VkImageCreateInfo-format-02561", "vkCreateImage(): mipLevels = %d, but when using a YCbCr Conversion format, mipLevels must be 1", pCreateInfo->arrayLayers); } if (pCreateInfo->samples != VK_SAMPLE_COUNT_1_BIT) { skip |= LogError( device, "VUID-VkImageCreateInfo-format-02562", "vkCreateImage(): samples = %s, but when using a YCbCr Conversion format, samples must be VK_SAMPLE_COUNT_1_BIT", string_VkSampleCountFlagBits(pCreateInfo->samples)); } if (pCreateInfo->imageType != VK_IMAGE_TYPE_2D) { skip |= LogError( device, "VUID-VkImageCreateInfo-format-02563", "vkCreateImage(): imageType = %s, but when using a YCbCr Conversion format, imageType must be VK_IMAGE_TYPE_2D ", string_VkImageType(pCreateInfo->imageType)); } } if (device_extensions.vk_khr_maintenance2) { if (pCreateInfo->flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT) { if (!(FormatIsCompressed_BC(pCreateInfo->format) || FormatIsCompressed_ASTC(pCreateInfo->format) || FormatIsCompressed_ETC2_EAC(pCreateInfo->format))) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-01572", "vkCreateImage(): If pCreateInfo->flags contains VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, " "format must be block, ETC or ASTC compressed, but is %s", string_VkFormat(pCreateInfo->format)); } if (!(pCreateInfo->flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT)) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-01573", "vkCreateImage(): If pCreateInfo->flags contains VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, " "flags must also contain VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT."); } } } if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT && pCreateInfo->pQueueFamilyIndices) { skip |= ValidatePhysicalDeviceQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices, "vkCreateImage", "pCreateInfo->pQueueFamilyIndices", "VUID-VkImageCreateInfo-sharingMode-01420"); } if (!FormatIsMultiplane(pCreateInfo->format) && !(pCreateInfo->flags & VK_IMAGE_CREATE_ALIAS_BIT) && (pCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT)) { skip |= LogError(device, "VUID-VkImageCreateInfo-format-01577", "vkCreateImage(): format is %s and flags are %s. The flags should not include VK_IMAGE_CREATE_DISJOINT_BIT.", string_VkFormat(pCreateInfo->format), string_VkImageCreateFlags(pCreateInfo->flags).c_str()); } const auto swapchain_create_info = lvl_find_in_chain<VkImageSwapchainCreateInfoKHR>(pCreateInfo->pNext); if (swapchain_create_info != nullptr) { if (swapchain_create_info->swapchain != VK_NULL_HANDLE) { const SWAPCHAIN_NODE *swapchain_state = GetSwapchainState(swapchain_create_info->swapchain); const VkSwapchainCreateFlagsKHR swapchain_flags = swapchain_state->createInfo.flags; // Validate rest of Swapchain Image create check that require swapchain state const char *vuid = "VUID-VkImageSwapchainCreateInfoKHR-swapchain-00995"; if (((swapchain_flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR) != 0) && ((pCreateInfo->flags & VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT) == 0)) { skip |= LogError( device, vuid, "vkCreateImage(): Swapchain was created with VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR flag so " "all swapchain images must have the VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT flag set."); } if (((swapchain_flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR) != 0) && ((pCreateInfo->flags & VK_IMAGE_CREATE_PROTECTED_BIT) == 0)) { skip |= LogError(device, vuid, "vkCreateImage(): Swapchain was created with VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR flag so all " "swapchain images must have the VK_IMAGE_CREATE_PROTECTED_BIT flag set."); } const VkImageCreateFlags mutable_flags = (VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR); if (((swapchain_flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) != 0) && ((pCreateInfo->flags & mutable_flags) != mutable_flags)) { skip |= LogError(device, vuid, "vkCreateImage(): Swapchain was created with VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR flag so " "all swapchain images must have the VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT and " "VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR flags both set."); } } } if ((pCreateInfo->flags & VK_IMAGE_CREATE_PROTECTED_BIT) != 0) { if (enabled_features.core11.protectedMemory == VK_FALSE) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-01890", "vkCreateImage(): the protectedMemory device feature is disabled: Images cannot be created with the " "VK_IMAGE_CREATE_PROTECTED_BIT set."); } const VkImageCreateFlags invalid_flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_ALIASED_BIT; if ((pCreateInfo->flags & invalid_flags) != 0) { skip |= LogError(device, "VUID-VkImageCreateInfo-None-01891", "vkCreateImage(): VK_IMAGE_CREATE_PROTECTED_BIT is set so no sparse create flags can be used at same " "time (VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | " "VK_IMAGE_CREATE_SPARSE_ALIASED_BIT)."); } } skip |= ValidateImageFormatFeatures(pCreateInfo); return skip; } void CoreChecks::PostCallRecordCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImage *pImage, VkResult result) { if (VK_SUCCESS != result) return; StateTracker::PostCallRecordCreateImage(device, pCreateInfo, pAllocator, pImage, result); auto image_state = Get<IMAGE_STATE>(*pImage); AddInitialLayoutintoImageLayoutMap(*image_state, imageLayoutMap); } bool CoreChecks::PreCallValidateDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) const { const IMAGE_STATE *image_state = GetImageState(image); const VulkanTypedHandle obj_struct(image, kVulkanObjectTypeImage); bool skip = false; if (image_state) { skip |= ValidateObjectNotInUse(image_state, obj_struct, "vkDestroyImage", "VUID-vkDestroyImage-image-01000"); } return skip; } void CoreChecks::PreCallRecordDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) { // Clean up validation specific data EraseQFOReleaseBarriers<VkImageMemoryBarrier>(image); imageLayoutMap.erase(image); // Clean up generic image state StateTracker::PreCallRecordDestroyImage(device, image, pAllocator); } bool CoreChecks::ValidateImageAttributes(const IMAGE_STATE *image_state, const VkImageSubresourceRange &range, const char *param_name) const { bool skip = false; const VkImage image = image_state->image; const VkFormat format = image_state->createInfo.format; if (range.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) { skip |= LogError(image, "VUID-vkCmdClearColorImage-aspectMask-02498", "vkCmdClearColorImage(): %s.aspectMasks must only be set to VK_IMAGE_ASPECT_COLOR_BIT.", param_name); } if (FormatIsDepthOrStencil(format)) { skip |= LogError(image, "VUID-vkCmdClearColorImage-image-00007", "vkCmdClearColorImage(): %s called with image %s which has a depth/stencil format (%s).", param_name, report_data->FormatHandle(image).c_str(), string_VkFormat(format)); } else if (FormatIsCompressed(format)) { skip |= LogError(image, "VUID-vkCmdClearColorImage-image-00007", "vkCmdClearColorImage(): %s called with image %s which has a compressed format (%s).", param_name, report_data->FormatHandle(image).c_str(), string_VkFormat(format)); } if (!(image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) { skip |= LogError(image, "VUID-vkCmdClearColorImage-image-00002", "vkCmdClearColorImage() %s called with image %s which was created without VK_IMAGE_USAGE_TRANSFER_DST_BIT.", param_name, report_data->FormatHandle(image).c_str()); } return skip; } bool CoreChecks::VerifyClearImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state, const VkImageSubresourceRange &range, VkImageLayout dest_image_layout, const char *func_name) const { bool skip = false; if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) { if ((dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) && (dest_image_layout != VK_IMAGE_LAYOUT_GENERAL)) { skip |= LogError(image_state->image, "VUID-vkCmdClearDepthStencilImage-imageLayout-00012", "%s: Layout for cleared image is %s but can only be TRANSFER_DST_OPTIMAL or GENERAL.", func_name, string_VkImageLayout(dest_image_layout)); } } else { assert(strcmp(func_name, "vkCmdClearColorImage()") == 0); if (!device_extensions.vk_khr_shared_presentable_image) { if ((dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) && (dest_image_layout != VK_IMAGE_LAYOUT_GENERAL)) { skip |= LogError(image_state->image, "VUID-vkCmdClearColorImage-imageLayout-00005", "%s: Layout for cleared image is %s but can only be TRANSFER_DST_OPTIMAL or GENERAL.", func_name, string_VkImageLayout(dest_image_layout)); } } else { if ((dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) && (dest_image_layout != VK_IMAGE_LAYOUT_GENERAL) && (dest_image_layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR)) { skip |= LogError( image_state->image, "VUID-vkCmdClearColorImage-imageLayout-01394", "%s: Layout for cleared image is %s but can only be TRANSFER_DST_OPTIMAL, SHARED_PRESENT_KHR, or GENERAL.", func_name, string_VkImageLayout(dest_image_layout)); } } } // Cast to const to prevent creation at validate time. const auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image_state->image); if (subresource_map) { bool subres_skip = false; LayoutUseCheckAndMessage layout_check(subresource_map); VkImageSubresourceRange normalized_isr = NormalizeSubresourceRange(*image_state, range); for (auto pos = subresource_map->Find(normalized_isr); (pos != subresource_map->End()) && !subres_skip; ++pos) { if (!layout_check.Check(pos->subresource, dest_image_layout, pos->current_layout, pos->initial_layout)) { const char *error_code = "VUID-vkCmdClearColorImage-imageLayout-00004"; if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) { error_code = "VUID-vkCmdClearDepthStencilImage-imageLayout-00011"; } else { assert(strcmp(func_name, "vkCmdClearColorImage()") == 0); } subres_skip |= LogError(cb_node->commandBuffer, error_code, "%s: Cannot clear an image whose layout is %s and doesn't match the %s layout %s.", func_name, string_VkImageLayout(dest_image_layout), layout_check.message, string_VkImageLayout(layout_check.layout)); } } skip |= subres_skip; } return skip; } bool CoreChecks::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue *pColor, uint32_t rangeCount, const VkImageSubresourceRange *pRanges) const { bool skip = false; // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state const auto *cb_node = GetCBState(commandBuffer); const auto *image_state = GetImageState(image); if (cb_node && image_state) { skip |= ValidateMemoryIsBoundToImage(image_state, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-image-00003"); skip |= ValidateCmdQueueFlags(cb_node, "vkCmdClearColorImage()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdClearColorImage-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()"); if (device_extensions.vk_khr_maintenance1) { skip |= ValidateImageFormatFeatureFlags(image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdClearColorImage", "VUID-vkCmdClearColorImage-image-01993"); } skip |= InsideRenderPass(cb_node, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-renderpass"); skip |= ValidateProtectedImage(cb_node, image_state, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-commandBuffer-01805"); skip |= ValidateUnprotectedImage(cb_node, image_state, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-commandBuffer-01806"); for (uint32_t i = 0; i < rangeCount; ++i) { std::string param_name = "pRanges[" + std::to_string(i) + "]"; skip |= ValidateCmdClearColorSubresourceRange(image_state, pRanges[i], param_name.c_str()); skip |= ValidateImageAttributes(image_state, pRanges[i], param_name.c_str()); skip |= VerifyClearImageLayout(cb_node, image_state, pRanges[i], imageLayout, "vkCmdClearColorImage()"); } // Tests for "Formats requiring sampler Y’CBCR conversion for VK_IMAGE_ASPECT_COLOR_BIT image views" if (FormatRequiresYcbcrConversion(image_state->createInfo.format)) { skip |= LogError(device, "VUID-vkCmdClearColorImage-image-01545", "vkCmdClearColorImage(): format (%s) must not be one of the formats requiring sampler YCBCR " "conversion for VK_IMAGE_ASPECT_COLOR_BIT image views", string_VkFormat(image_state->createInfo.format)); } } return skip; } void CoreChecks::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue *pColor, uint32_t rangeCount, const VkImageSubresourceRange *pRanges) { StateTracker::PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges); auto cb_node = GetCBState(commandBuffer); auto image_state = GetImageState(image); if (cb_node && image_state) { for (uint32_t i = 0; i < rangeCount; ++i) { SetImageInitialLayout(cb_node, image, pRanges[i], imageLayout); } } } bool CoreChecks::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange *pRanges) const { bool skip = false; // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state const auto *cb_node = GetCBState(commandBuffer); const auto *image_state = GetImageState(image); if (cb_node && image_state) { const VkFormat image_format = image_state->createInfo.format; skip |= ValidateMemoryIsBoundToImage(image_state, "vkCmdClearDepthStencilImage()", "VUID-vkCmdClearDepthStencilImage-image-00010"); skip |= ValidateCmdQueueFlags(cb_node, "vkCmdClearDepthStencilImage()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdClearDepthStencilImage-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()"); if (device_extensions.vk_khr_maintenance1) { skip |= ValidateImageFormatFeatureFlags(image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdClearDepthStencilImage", "VUID-vkCmdClearDepthStencilImage-image-01994"); } skip |= InsideRenderPass(cb_node, "vkCmdClearDepthStencilImage()", "VUID-vkCmdClearDepthStencilImage-renderpass"); skip |= ValidateProtectedImage(cb_node, image_state, "vkCmdClearDepthStencilImage()", "VUID-vkCmdClearDepthStencilImage-commandBuffer-01807"); skip |= ValidateUnprotectedImage(cb_node, image_state, "vkCmdClearDepthStencilImage()", "VUID-vkCmdClearDepthStencilImage-commandBuffer-01808"); bool any_include_aspect_depth_bit = false; bool any_include_aspect_stencil_bit = false; for (uint32_t i = 0; i < rangeCount; ++i) { std::string param_name = "pRanges[" + std::to_string(i) + "]"; skip |= ValidateCmdClearDepthSubresourceRange(image_state, pRanges[i], param_name.c_str()); skip |= VerifyClearImageLayout(cb_node, image_state, pRanges[i], imageLayout, "vkCmdClearDepthStencilImage()"); // Image aspect must be depth or stencil or both VkImageAspectFlags valid_aspects = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; if (((pRanges[i].aspectMask & valid_aspects) == 0) || ((pRanges[i].aspectMask & ~valid_aspects) != 0)) { skip |= LogError(commandBuffer, "VUID-vkCmdClearDepthStencilImage-aspectMask-02824", "vkCmdClearDepthStencilImage(): pRanges[%u].aspectMask can only be VK_IMAGE_ASPECT_DEPTH_BIT " "and/or VK_IMAGE_ASPECT_STENCIL_BIT.", i); } if ((pRanges[i].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) != 0) { any_include_aspect_depth_bit = true; if (FormatHasDepth(image_format) == false) { skip |= LogError(commandBuffer, "VUID-vkCmdClearDepthStencilImage-image-02826", "vkCmdClearDepthStencilImage(): pRanges[%u].aspectMask has a VK_IMAGE_ASPECT_DEPTH_BIT but %s " "doesn't have a depth component.", i, string_VkFormat(image_format)); } } if ((pRanges[i].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != 0) { any_include_aspect_stencil_bit = true; if (FormatHasStencil(image_format) == false) { skip |= LogError(commandBuffer, "VUID-vkCmdClearDepthStencilImage-image-02825", "vkCmdClearDepthStencilImage(): pRanges[%u].aspectMask has a VK_IMAGE_ASPECT_STENCIL_BIT but " "%s doesn't have a stencil component.", i, string_VkFormat(image_format)); } } } if (any_include_aspect_stencil_bit) { const auto image_stencil_struct = lvl_find_in_chain<VkImageStencilUsageCreateInfoEXT>(image_state->createInfo.pNext); if (image_stencil_struct != nullptr) { if ((image_stencil_struct->stencilUsage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) { skip |= LogError(device, "VUID-vkCmdClearDepthStencilImage-pRanges-02658", "vkCmdClearDepthStencilImage(): an element of pRanges.aspect includes VK_IMAGE_ASPECT_STENCIL_BIT " "and image was created with separate stencil usage, VK_IMAGE_USAGE_TRANSFER_DST_BIT must be " "included in VkImageStencilUsageCreateInfo::stencilUsage used to create image"); } } else { if ((image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) { skip |= LogError( device, "VUID-vkCmdClearDepthStencilImage-pRanges-02659", "vkCmdClearDepthStencilImage(): an element of pRanges.aspect includes VK_IMAGE_ASPECT_STENCIL_BIT and " "image was not created with separate stencil usage, VK_IMAGE_USAGE_TRANSFER_DST_BIT must be included " "in VkImageCreateInfo::usage used to create image"); } } } if (any_include_aspect_depth_bit && (image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) { skip |= LogError(device, "VUID-vkCmdClearDepthStencilImage-pRanges-02660", "vkCmdClearDepthStencilImage(): an element of pRanges.aspect includes VK_IMAGE_ASPECT_DEPTH_BIT, " "VK_IMAGE_USAGE_TRANSFER_DST_BIT must be included in VkImageCreateInfo::usage used to create image"); } if (image_state && !FormatIsDepthOrStencil(image_format)) { skip |= LogError(image, "VUID-vkCmdClearDepthStencilImage-image-00014", "vkCmdClearDepthStencilImage(): called with image %s which doesn't have a depth/stencil format (%s).", report_data->FormatHandle(image).c_str(), string_VkFormat(image_format)); } if (VK_IMAGE_USAGE_TRANSFER_DST_BIT != (VK_IMAGE_USAGE_TRANSFER_DST_BIT & image_state->createInfo.usage)) { skip |= LogError(image, "VUID-vkCmdClearDepthStencilImage-image-00009", "vkCmdClearDepthStencilImage(): called with image %s which was not created with the " "VK_IMAGE_USAGE_TRANSFER_DST_BIT set.", report_data->FormatHandle(image).c_str()); } } return skip; } void CoreChecks::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange *pRanges) { StateTracker::PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges); auto cb_node = GetCBState(commandBuffer); auto image_state = GetImageState(image); if (cb_node && image_state) { for (uint32_t i = 0; i < rangeCount; ++i) { SetImageInitialLayout(cb_node, image, pRanges[i], imageLayout); } } } // Returns true if [x, xoffset] and [y, yoffset] overlap static bool RangesIntersect(int32_t start, uint32_t start_offset, int32_t end, uint32_t end_offset) { bool result = false; uint32_t intersection_min = std::max(static_cast<uint32_t>(start), static_cast<uint32_t>(end)); uint32_t intersection_max = std::min(static_cast<uint32_t>(start) + start_offset, static_cast<uint32_t>(end) + end_offset); if (intersection_max > intersection_min) { result = true; } return result; } // Returns true if source area of first copy region intersects dest area of second region // It is assumed that these are copy regions within a single image (otherwise no possibility of collision) static bool RegionIntersects(const VkImageCopy *rgn0, const VkImageCopy *rgn1, VkImageType type, bool is_multiplane) { bool result = false; // Separate planes within a multiplane image cannot intersect if (is_multiplane && (rgn0->srcSubresource.aspectMask != rgn1->dstSubresource.aspectMask)) { return result; } if ((rgn0->srcSubresource.mipLevel == rgn1->dstSubresource.mipLevel) && (RangesIntersect(rgn0->srcSubresource.baseArrayLayer, rgn0->srcSubresource.layerCount, rgn1->dstSubresource.baseArrayLayer, rgn1->dstSubresource.layerCount))) { result = true; switch (type) { case VK_IMAGE_TYPE_3D: result &= RangesIntersect(rgn0->srcOffset.z, rgn0->extent.depth, rgn1->dstOffset.z, rgn1->extent.depth); // fall through case VK_IMAGE_TYPE_2D: result &= RangesIntersect(rgn0->srcOffset.y, rgn0->extent.height, rgn1->dstOffset.y, rgn1->extent.height); // fall through case VK_IMAGE_TYPE_1D: result &= RangesIntersect(rgn0->srcOffset.x, rgn0->extent.width, rgn1->dstOffset.x, rgn1->extent.width); break; default: // Unrecognized or new IMAGE_TYPE enums will be caught in parameter_validation assert(false); } } return result; } // Returns non-zero if offset and extent exceed image extents static const uint32_t x_bit = 1; static const uint32_t y_bit = 2; static const uint32_t z_bit = 4; static uint32_t ExceedsBounds(const VkOffset3D *offset, const VkExtent3D *extent, const VkExtent3D *image_extent) { uint32_t result = 0; // Extents/depths cannot be negative but checks left in for clarity if ((offset->z + extent->depth > image_extent->depth) || (offset->z < 0) || ((offset->z + static_cast<int32_t>(extent->depth)) < 0)) { result |= z_bit; } if ((offset->y + extent->height > image_extent->height) || (offset->y < 0) || ((offset->y + static_cast<int32_t>(extent->height)) < 0)) { result |= y_bit; } if ((offset->x + extent->width > image_extent->width) || (offset->x < 0) || ((offset->x + static_cast<int32_t>(extent->width)) < 0)) { result |= x_bit; } return result; } // Test if two VkExtent3D structs are equivalent static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) { bool result = true; if ((extent->width != other_extent->width) || (extent->height != other_extent->height) || (extent->depth != other_extent->depth)) { result = false; } return result; } // Test if the extent argument has all dimensions set to 0. static inline bool IsExtentAllZeroes(const VkExtent3D *extent) { return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0)); } // Returns the image transfer granularity for a specific image scaled by compressed block size if necessary. VkExtent3D CoreChecks::GetScaledItg(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img) const { // Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device. VkExtent3D granularity = {0, 0, 0}; auto pPool = cb_node->command_pool.get(); if (pPool) { granularity = GetPhysicalDeviceState()->queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity; if (FormatIsCompressed(img->createInfo.format) || FormatIsSinglePlane_422(img->createInfo.format)) { auto block_size = FormatTexelBlockExtent(img->createInfo.format); granularity.width *= block_size.width; granularity.height *= block_size.height; } } return granularity; } // Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) { bool valid = true; if ((SafeModulo(extent->depth, granularity->depth) != 0) || (SafeModulo(extent->width, granularity->width) != 0) || (SafeModulo(extent->height, granularity->height) != 0)) { valid = false; } return valid; } // Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values bool CoreChecks::CheckItgOffset(const CMD_BUFFER_STATE *cb_node, const VkOffset3D *offset, const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member, const char *vuid) const { bool skip = false; VkExtent3D offset_extent = {}; offset_extent.width = static_cast<uint32_t>(abs(offset->x)); offset_extent.height = static_cast<uint32_t>(abs(offset->y)); offset_extent.depth = static_cast<uint32_t>(abs(offset->z)); if (IsExtentAllZeroes(granularity)) { // If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0) if (IsExtentAllZeroes(&offset_extent) == false) { skip |= LogError(cb_node->commandBuffer, vuid, "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) when the command buffer's queue family " "image transfer granularity is (w=0, h=0, d=0).", function, i, member, offset->x, offset->y, offset->z); } } else { // If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even // integer multiples of the image transfer granularity. if (IsExtentAligned(&offset_extent, granularity) == false) { skip |= LogError(cb_node->commandBuffer, vuid, "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer multiples of this command " "buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).", function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height, granularity->depth); } } return skip; } // Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values bool CoreChecks::CheckItgExtent(const CMD_BUFFER_STATE *cb_node, const VkExtent3D *extent, const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent, const VkImageType image_type, const uint32_t i, const char *function, const char *member, const char *vuid) const { bool skip = false; if (IsExtentAllZeroes(granularity)) { // If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image // subresource extent. if (IsExtentEqual(extent, subresource_extent) == false) { skip |= LogError(cb_node->commandBuffer, vuid, "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) " "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).", function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width, subresource_extent->height, subresource_extent->depth); } } else { // If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even // integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image // subresource extent dimensions. VkExtent3D offset_extent_sum = {}; offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width; offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height; offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth; bool x_ok = true; bool y_ok = true; bool z_ok = true; switch (image_type) { case VK_IMAGE_TYPE_3D: z_ok = ((0 == SafeModulo(extent->depth, granularity->depth)) || (subresource_extent->depth == offset_extent_sum.depth)); // fall through case VK_IMAGE_TYPE_2D: y_ok = ((0 == SafeModulo(extent->height, granularity->height)) || (subresource_extent->height == offset_extent_sum.height)); // fall through case VK_IMAGE_TYPE_1D: x_ok = ((0 == SafeModulo(extent->width, granularity->width)) || (subresource_extent->width == offset_extent_sum.width)); break; default: // Unrecognized or new IMAGE_TYPE enums will be caught in parameter_validation assert(false); } if (!(x_ok && y_ok && z_ok)) { skip |= LogError(cb_node->commandBuffer, vuid, "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command " "buffer's queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + " "extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).", function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height, granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth, subresource_extent->width, subresource_extent->height, subresource_extent->depth); } } return skip; } bool CoreChecks::ValidateImageMipLevel(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img, uint32_t mip_level, const uint32_t i, const char *function, const char *member, const char *vuid) const { bool skip = false; if (mip_level >= img->createInfo.mipLevels) { skip |= LogError(cb_node->commandBuffer, vuid, "In %s, pRegions[%u].%s.mipLevel is %u, but provided %s has %u mip levels.", function, i, member, mip_level, report_data->FormatHandle(img->image).c_str(), img->createInfo.mipLevels); } return skip; } bool CoreChecks::ValidateImageArrayLayerRange(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img, const uint32_t base_layer, const uint32_t layer_count, const uint32_t i, const char *function, const char *member, const char *vuid) const { bool skip = false; if (base_layer >= img->createInfo.arrayLayers || layer_count > img->createInfo.arrayLayers || (base_layer + layer_count) > img->createInfo.arrayLayers) { skip |= LogError(cb_node->commandBuffer, vuid, "In %s, pRegions[%u].%s.baseArrayLayer is %u and .layerCount is " "%u, but provided %s has %u array layers.", function, i, member, base_layer, layer_count, report_data->FormatHandle(img->image).c_str(), img->createInfo.arrayLayers); } return skip; } // Check valid usage Image Transfer Granularity requirements for elements of a VkBufferImageCopy structure bool CoreChecks::ValidateCopyBufferImageTransferGranularityRequirements(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img, const VkBufferImageCopy *region, const uint32_t i, const char *function, const char *vuid) const { bool skip = false; VkExtent3D granularity = GetScaledItg(cb_node, img); skip |= CheckItgOffset(cb_node, &region->imageOffset, &granularity, i, function, "imageOffset", vuid); VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->imageSubresource); skip |= CheckItgExtent(cb_node, &region->imageExtent, &region->imageOffset, &granularity, &subresource_extent, img->createInfo.imageType, i, function, "imageExtent", vuid); return skip; } // Check valid usage Image Transfer Granularity requirements for elements of a VkImageCopy structure bool CoreChecks::ValidateCopyImageTransferGranularityRequirements(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *src_img, const IMAGE_STATE *dst_img, const VkImageCopy *region, const uint32_t i, const char *function) const { bool skip = false; // Source image checks VkExtent3D granularity = GetScaledItg(cb_node, src_img); skip |= CheckItgOffset(cb_node, &region->srcOffset, &granularity, i, function, "srcOffset", "VUID-vkCmdCopyImage-srcOffset-01783"); VkExtent3D subresource_extent = GetImageSubresourceExtent(src_img, &region->srcSubresource); const VkExtent3D extent = region->extent; skip |= CheckItgExtent(cb_node, &extent, &region->srcOffset, &granularity, &subresource_extent, src_img->createInfo.imageType, i, function, "extent", "VUID-vkCmdCopyImage-srcOffset-01783"); // Destination image checks granularity = GetScaledItg(cb_node, dst_img); skip |= CheckItgOffset(cb_node, &region->dstOffset, &granularity, i, function, "dstOffset", "VUID-vkCmdCopyImage-dstOffset-01784"); // Adjust dest extent, if necessary const VkExtent3D dest_effective_extent = GetAdjustedDestImageExtent(src_img->createInfo.format, dst_img->createInfo.format, extent); subresource_extent = GetImageSubresourceExtent(dst_img, &region->dstSubresource); skip |= CheckItgExtent(cb_node, &dest_effective_extent, &region->dstOffset, &granularity, &subresource_extent, dst_img->createInfo.imageType, i, function, "extent", "VUID-vkCmdCopyImage-dstOffset-01784"); return skip; } // Validate contents of a VkImageCopy struct bool CoreChecks::ValidateImageCopyData(const uint32_t regionCount, const VkImageCopy *ic_regions, const IMAGE_STATE *src_state, const IMAGE_STATE *dst_state) const { bool skip = false; for (uint32_t i = 0; i < regionCount; i++) { const VkImageCopy region = ic_regions[i]; // For comp<->uncomp copies, the copy extent for the dest image must be adjusted const VkExtent3D src_copy_extent = region.extent; const VkExtent3D dst_copy_extent = GetAdjustedDestImageExtent(src_state->createInfo.format, dst_state->createInfo.format, region.extent); bool slice_override = false; uint32_t depth_slices = 0; // Special case for copying between a 1D/2D array and a 3D image // TBD: This seems like the only way to reconcile 3 mutually-exclusive VU checks for 2D/3D copies. Heads up. if ((VK_IMAGE_TYPE_3D == src_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != dst_state->createInfo.imageType)) { depth_slices = region.dstSubresource.layerCount; // Slice count from 2D subresource slice_override = (depth_slices != 1); } else if ((VK_IMAGE_TYPE_3D == dst_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != src_state->createInfo.imageType)) { depth_slices = region.srcSubresource.layerCount; // Slice count from 2D subresource slice_override = (depth_slices != 1); } // Do all checks on source image if (src_state->createInfo.imageType == VK_IMAGE_TYPE_1D) { if ((0 != region.srcOffset.y) || (1 != src_copy_extent.height)) { skip |= LogError(src_state->image, "VUID-vkCmdCopyImage-srcImage-00146", "vkCmdCopyImage(): pRegion[%d] srcOffset.y is %d and extent.height is %d. For 1D images these must " "be 0 and 1, respectively.", i, region.srcOffset.y, src_copy_extent.height); } } if ((src_state->createInfo.imageType == VK_IMAGE_TYPE_1D) && ((0 != region.srcOffset.z) || (1 != src_copy_extent.depth))) { skip |= LogError(src_state->image, "VUID-vkCmdCopyImage-srcImage-01785", "vkCmdCopyImage(): pRegion[%d] srcOffset.z is %d and extent.depth is %d. For 1D images " "these must be 0 and 1, respectively.", i, region.srcOffset.z, src_copy_extent.depth); } if ((src_state->createInfo.imageType == VK_IMAGE_TYPE_2D) && (0 != region.srcOffset.z)) { skip |= LogError(src_state->image, "VUID-vkCmdCopyImage-srcImage-01787", "vkCmdCopyImage(): pRegion[%d] srcOffset.z is %d. For 2D images the z-offset must be 0.", i, region.srcOffset.z); } // Source checks that apply only to compressed images (or to _422 images if ycbcr enabled) bool ext_ycbcr = IsExtEnabled(device_extensions.vk_khr_sampler_ycbcr_conversion); if (FormatIsCompressed(src_state->createInfo.format) || (ext_ycbcr && FormatIsSinglePlane_422(src_state->createInfo.format))) { const VkExtent3D block_size = FormatTexelBlockExtent(src_state->createInfo.format); // image offsets must be multiples of block dimensions if ((SafeModulo(region.srcOffset.x, block_size.width) != 0) || (SafeModulo(region.srcOffset.y, block_size.height) != 0) || (SafeModulo(region.srcOffset.z, block_size.depth) != 0)) { const char *vuid = ext_ycbcr ? "VUID-vkCmdCopyImage-srcImage-01727" : "VUID-vkCmdCopyImage-srcImage-01727"; skip |= LogError(src_state->image, vuid, "vkCmdCopyImage(): pRegion[%d] srcOffset (%d, %d) must be multiples of the compressed image's " "texel width & height (%d, %d).", i, region.srcOffset.x, region.srcOffset.y, block_size.width, block_size.height); } const VkExtent3D mip_extent = GetImageSubresourceExtent(src_state, &(region.srcSubresource)); if ((SafeModulo(src_copy_extent.width, block_size.width) != 0) && (src_copy_extent.width + region.srcOffset.x != mip_extent.width)) { const char *vuid = ext_ycbcr ? "VUID-vkCmdCopyImage-srcImage-01728" : "VUID-vkCmdCopyImage-srcImage-01728"; skip |= LogError(src_state->image, vuid, "vkCmdCopyImage(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block " "width (%d), or when added to srcOffset.x (%d) must equal the image subresource width (%d).", i, src_copy_extent.width, block_size.width, region.srcOffset.x, mip_extent.width); } // Extent height must be a multiple of block height, or extent+offset height must equal subresource height if ((SafeModulo(src_copy_extent.height, block_size.height) != 0) && (src_copy_extent.height + region.srcOffset.y != mip_extent.height)) { const char *vuid = ext_ycbcr ? "VUID-vkCmdCopyImage-srcImage-01729" : "VUID-vkCmdCopyImage-srcImage-01729"; skip |= LogError(src_state->image, vuid, "vkCmdCopyImage(): pRegion[%d] extent height (%d) must be a multiple of the compressed texture block " "height (%d), or when added to srcOffset.y (%d) must equal the image subresource height (%d).", i, src_copy_extent.height, block_size.height, region.srcOffset.y, mip_extent.height); } // Extent depth must be a multiple of block depth, or extent+offset depth must equal subresource depth uint32_t copy_depth = (slice_override ? depth_slices : src_copy_extent.depth); if ((SafeModulo(copy_depth, block_size.depth) != 0) && (copy_depth + region.srcOffset.z != mip_extent.depth)) { const char *vuid = ext_ycbcr ? "VUID-vkCmdCopyImage-srcImage-01730" : "VUID-vkCmdCopyImage-srcImage-01730"; skip |= LogError(src_state->image, vuid, "vkCmdCopyImage(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block " "depth (%d), or when added to srcOffset.z (%d) must equal the image subresource depth (%d).", i, src_copy_extent.depth, block_size.depth, region.srcOffset.z, mip_extent.depth); } } // Compressed // Do all checks on dest image if (dst_state->createInfo.imageType == VK_IMAGE_TYPE_1D) { if ((0 != region.dstOffset.y) || (1 != dst_copy_extent.height)) { skip |= LogError(dst_state->image, "VUID-vkCmdCopyImage-dstImage-00152", "vkCmdCopyImage(): pRegion[%d] dstOffset.y is %d and dst_copy_extent.height is %d. For 1D images " "these must be 0 and 1, respectively.", i, region.dstOffset.y, dst_copy_extent.height); } } if ((dst_state->createInfo.imageType == VK_IMAGE_TYPE_1D) && ((0 != region.dstOffset.z) || (1 != dst_copy_extent.depth))) { skip |= LogError(dst_state->image, "VUID-vkCmdCopyImage-dstImage-01786", "vkCmdCopyImage(): pRegion[%d] dstOffset.z is %d and extent.depth is %d. For 1D images these must be 0 " "and 1, respectively.", i, region.dstOffset.z, dst_copy_extent.depth); } if ((dst_state->createInfo.imageType == VK_IMAGE_TYPE_2D) && (0 != region.dstOffset.z)) { skip |= LogError(dst_state->image, "VUID-vkCmdCopyImage-dstImage-01788", "vkCmdCopyImage(): pRegion[%d] dstOffset.z is %d. For 2D images the z-offset must be 0.", i, region.dstOffset.z); } // Handle difference between Maintenance 1 if (device_extensions.vk_khr_maintenance1) { if (src_state->createInfo.imageType == VK_IMAGE_TYPE_3D) { if ((0 != region.srcSubresource.baseArrayLayer) || (1 != region.srcSubresource.layerCount)) { skip |= LogError(src_state->image, "VUID-vkCmdCopyImage-srcImage-04443", "vkCmdCopyImage(): pRegion[%d] srcSubresource.baseArrayLayer is %d and srcSubresource.layerCount " "is %d. For VK_IMAGE_TYPE_3D images these must be 0 and 1, respectively.", i, region.srcSubresource.baseArrayLayer, region.srcSubresource.layerCount); } } if (dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) { if ((0 != region.dstSubresource.baseArrayLayer) || (1 != region.dstSubresource.layerCount)) { skip |= LogError(dst_state->image, "VUID-vkCmdCopyImage-dstImage-04444", "vkCmdCopyImage(): pRegion[%d] dstSubresource.baseArrayLayer is %d and dstSubresource.layerCount " "is %d. For VK_IMAGE_TYPE_3D images these must be 0 and 1, respectively.", i, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount); } } } else { // Pre maint 1 if (src_state->createInfo.imageType == VK_IMAGE_TYPE_3D || dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) { if ((0 != region.srcSubresource.baseArrayLayer) || (1 != region.srcSubresource.layerCount)) { skip |= LogError(src_state->image, "VUID-vkCmdCopyImage-srcImage-00139", "vkCmdCopyImage(): pRegion[%d] srcSubresource.baseArrayLayer is %d and " "srcSubresource.layerCount is %d. For copies with either source or dest of type " "VK_IMAGE_TYPE_3D, these must be 0 and 1, respectively.", i, region.srcSubresource.baseArrayLayer, region.srcSubresource.layerCount); } if ((0 != region.dstSubresource.baseArrayLayer) || (1 != region.dstSubresource.layerCount)) { skip |= LogError(dst_state->image, "VUID-vkCmdCopyImage-srcImage-00139", "vkCmdCopyImage(): pRegion[%d] dstSubresource.baseArrayLayer is %d and " "dstSubresource.layerCount is %d. For copies with either source or dest of type " "VK_IMAGE_TYPE_3D, these must be 0 and 1, respectively.", i, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount); } } } // Dest checks that apply only to compressed images (or to _422 images if ycbcr enabled) if (FormatIsCompressed(dst_state->createInfo.format) || (ext_ycbcr && FormatIsSinglePlane_422(dst_state->createInfo.format))) { const VkExtent3D block_size = FormatTexelBlockExtent(dst_state->createInfo.format); // image offsets must be multiples of block dimensions if ((SafeModulo(region.dstOffset.x, block_size.width) != 0) || (SafeModulo(region.dstOffset.y, block_size.height) != 0) || (SafeModulo(region.dstOffset.z, block_size.depth) != 0)) { const char *vuid = ext_ycbcr ? "VUID-vkCmdCopyImage-dstImage-01731" : "VUID-vkCmdCopyImage-dstImage-01731"; skip |= LogError(dst_state->image, vuid, "vkCmdCopyImage(): pRegion[%d] dstOffset (%d, %d) must be multiples of the compressed image's " "texel width & height (%d, %d).", i, region.dstOffset.x, region.dstOffset.y, block_size.width, block_size.height); } const VkExtent3D mip_extent = GetImageSubresourceExtent(dst_state, &(region.dstSubresource)); if ((SafeModulo(dst_copy_extent.width, block_size.width) != 0) && (dst_copy_extent.width + region.dstOffset.x != mip_extent.width)) { const char *vuid = ext_ycbcr ? "VUID-vkCmdCopyImage-dstImage-01732" : "VUID-vkCmdCopyImage-dstImage-01732"; skip |= LogError( dst_state->image, vuid, "vkCmdCopyImage(): pRegion[%d] dst_copy_extent width (%d) must be a multiple of the compressed texture " "block width (%d), or when added to dstOffset.x (%d) must equal the image subresource width (%d).", i, dst_copy_extent.width, block_size.width, region.dstOffset.x, mip_extent.width); } // Extent height must be a multiple of block height, or dst_copy_extent+offset height must equal subresource height if ((SafeModulo(dst_copy_extent.height, block_size.height) != 0) && (dst_copy_extent.height + region.dstOffset.y != mip_extent.height)) { const char *vuid = ext_ycbcr ? "VUID-vkCmdCopyImage-dstImage-01733" : "VUID-vkCmdCopyImage-dstImage-01733"; skip |= LogError(dst_state->image, vuid, "vkCmdCopyImage(): pRegion[%d] dst_copy_extent height (%d) must be a multiple of the compressed " "texture block height (%d), or when added to dstOffset.y (%d) must equal the image subresource " "height (%d).", i, dst_copy_extent.height, block_size.height, region.dstOffset.y, mip_extent.height); } // Extent depth must be a multiple of block depth, or dst_copy_extent+offset depth must equal subresource depth uint32_t copy_depth = (slice_override ? depth_slices : dst_copy_extent.depth); if ((SafeModulo(copy_depth, block_size.depth) != 0) && (copy_depth + region.dstOffset.z != mip_extent.depth)) { const char *vuid = ext_ycbcr ? "VUID-vkCmdCopyImage-dstImage-01734" : "VUID-vkCmdCopyImage-dstImage-01734"; skip |= LogError( dst_state->image, vuid, "vkCmdCopyImage(): pRegion[%d] dst_copy_extent width (%d) must be a multiple of the compressed texture " "block depth (%d), or when added to dstOffset.z (%d) must equal the image subresource depth (%d).", i, dst_copy_extent.depth, block_size.depth, region.dstOffset.z, mip_extent.depth); } } // Compressed } return skip; } // vkCmdCopyImage checks that only apply if the multiplane extension is enabled bool CoreChecks::CopyImageMultiplaneValidation(VkCommandBuffer command_buffer, const IMAGE_STATE *src_image_state, const IMAGE_STATE *dst_image_state, const VkImageCopy region) const { bool skip = false; // Neither image is multiplane if ((!FormatIsMultiplane(src_image_state->createInfo.format)) && (!FormatIsMultiplane(dst_image_state->createInfo.format))) { // If neither image is multi-plane the aspectMask member of src and dst must match if (region.srcSubresource.aspectMask != region.dstSubresource.aspectMask) { std::stringstream ss; ss << "vkCmdCopyImage(): Copy between non-multiplane images with differing aspectMasks ( 0x" << std::hex << region.srcSubresource.aspectMask << " and 0x" << region.dstSubresource.aspectMask << " )"; skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-srcImage-01551", "%s.", ss.str().c_str()); } } else { // Source image multiplane checks uint32_t planes = FormatPlaneCount(src_image_state->createInfo.format); VkImageAspectFlags aspect = region.srcSubresource.aspectMask; if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR)) { std::stringstream ss; ss << "vkCmdCopyImage(): Source image aspect mask (0x" << std::hex << aspect << ") is invalid for 2-plane format"; skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-srcImage-01552", "%s.", ss.str().c_str()); } if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)) { std::stringstream ss; ss << "vkCmdCopyImage(): Source image aspect mask (0x" << std::hex << aspect << ") is invalid for 3-plane format"; skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-srcImage-01553", "%s.", ss.str().c_str()); } // Single-plane to multi-plane if ((!FormatIsMultiplane(src_image_state->createInfo.format)) && (FormatIsMultiplane(dst_image_state->createInfo.format)) && (VK_IMAGE_ASPECT_COLOR_BIT != aspect)) { std::stringstream ss; ss << "vkCmdCopyImage(): Source image aspect mask (0x" << std::hex << aspect << ") is not VK_IMAGE_ASPECT_COLOR_BIT"; skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-dstImage-01557", "%s.", ss.str().c_str()); } // Dest image multiplane checks planes = FormatPlaneCount(dst_image_state->createInfo.format); aspect = region.dstSubresource.aspectMask; if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR)) { std::stringstream ss; ss << "vkCmdCopyImage(): Dest image aspect mask (0x" << std::hex << aspect << ") is invalid for 2-plane format"; skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-dstImage-01554", "%s.", ss.str().c_str()); } if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)) { std::stringstream ss; ss << "vkCmdCopyImage(): Dest image aspect mask (0x" << std::hex << aspect << ") is invalid for 3-plane format"; skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-dstImage-01555", "%s.", ss.str().c_str()); } // Multi-plane to single-plane if ((FormatIsMultiplane(src_image_state->createInfo.format)) && (!FormatIsMultiplane(dst_image_state->createInfo.format)) && (VK_IMAGE_ASPECT_COLOR_BIT != aspect)) { std::stringstream ss; ss << "vkCmdCopyImage(): Dest image aspect mask (0x" << std::hex << aspect << ") is not VK_IMAGE_ASPECT_COLOR_BIT"; skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-srcImage-01556", "%s.", ss.str().c_str()); } } return skip; } bool CoreChecks::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) const { const auto *cb_node = GetCBState(commandBuffer); const auto *src_image_state = GetImageState(srcImage); const auto *dst_image_state = GetImageState(dstImage); const VkFormat src_format = src_image_state->createInfo.format; const VkFormat dst_format = dst_image_state->createInfo.format; bool skip = false; skip = ValidateImageCopyData(regionCount, pRegions, src_image_state, dst_image_state); VkCommandBuffer command_buffer = cb_node->commandBuffer; for (uint32_t i = 0; i < regionCount; i++) { const VkImageCopy region = pRegions[i]; // For comp/uncomp copies, the copy extent for the dest image must be adjusted VkExtent3D src_copy_extent = region.extent; VkExtent3D dst_copy_extent = GetAdjustedDestImageExtent(src_format, dst_format, region.extent); bool slice_override = false; uint32_t depth_slices = 0; // Special case for copying between a 1D/2D array and a 3D image // TBD: This seems like the only way to reconcile 3 mutually-exclusive VU checks for 2D/3D copies. Heads up. if ((VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != dst_image_state->createInfo.imageType)) { depth_slices = region.dstSubresource.layerCount; // Slice count from 2D subresource slice_override = (depth_slices != 1); } else if ((VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != src_image_state->createInfo.imageType)) { depth_slices = region.srcSubresource.layerCount; // Slice count from 2D subresource slice_override = (depth_slices != 1); } skip |= ValidateImageSubresourceLayers(cb_node, &region.srcSubresource, "vkCmdCopyImage", "srcSubresource", i); skip |= ValidateImageSubresourceLayers(cb_node, &region.dstSubresource, "vkCmdCopyImage", "dstSubresource", i); skip |= ValidateImageMipLevel(cb_node, src_image_state, region.srcSubresource.mipLevel, i, "vkCmdCopyImage", "srcSubresource", "VUID-vkCmdCopyImage-srcSubresource-01696"); skip |= ValidateImageMipLevel(cb_node, dst_image_state, region.dstSubresource.mipLevel, i, "vkCmdCopyImage", "dstSubresource", "VUID-vkCmdCopyImage-dstSubresource-01697"); skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, region.srcSubresource.baseArrayLayer, region.srcSubresource.layerCount, i, "vkCmdCopyImage", "srcSubresource", "VUID-vkCmdCopyImage-srcSubresource-01698"); skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount, i, "vkCmdCopyImage", "dstSubresource", "VUID-vkCmdCopyImage-dstSubresource-01699"); if (device_extensions.vk_khr_maintenance1) { // No chance of mismatch if we're overriding depth slice count if (!slice_override) { // The number of depth slices in srcSubresource and dstSubresource must match // Depth comes from layerCount for 1D,2D resources, from extent.depth for 3D uint32_t src_slices = (VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType ? src_copy_extent.depth : region.srcSubresource.layerCount); uint32_t dst_slices = (VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType ? dst_copy_extent.depth : region.dstSubresource.layerCount); if (src_slices != dst_slices) { skip |= LogError(command_buffer, "VUID-VkImageCopy-extent-00140", "vkCmdCopyImage(): number of depth slices in source and destination subresources for " "pRegions[%u] do not match.", i); } } } else { // For each region the layerCount member of srcSubresource and dstSubresource must match if (region.srcSubresource.layerCount != region.dstSubresource.layerCount) { skip |= LogError( command_buffer, "VUID-VkImageCopy-layerCount-00138", "vkCmdCopyImage(): number of layers in source and destination subresources for pRegions[%u] do not match", i); } } // Do multiplane-specific checks, if extension enabled if (device_extensions.vk_khr_sampler_ycbcr_conversion) { skip |= CopyImageMultiplaneValidation(command_buffer, src_image_state, dst_image_state, region); } if (!device_extensions.vk_khr_sampler_ycbcr_conversion) { // not multi-plane, the aspectMask member of srcSubresource and dstSubresource must match if (region.srcSubresource.aspectMask != region.dstSubresource.aspectMask) { char const str[] = "vkCmdCopyImage(): Src and dest aspectMasks for each region must match"; skip |= LogError(command_buffer, "VUID-VkImageCopy-aspectMask-00137", "%s.", str); } } // For each region, the aspectMask member of srcSubresource must be present in the source image if (!VerifyAspectsPresent(region.srcSubresource.aspectMask, src_format)) { std::stringstream ss; ss << "vkCmdCopyImage(): pRegion[" << i << "] srcSubresource.aspectMask cannot specify aspects not present in source image"; skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-aspectMask-00142", "%s.", ss.str().c_str()); } // For each region, the aspectMask member of dstSubresource must be present in the destination image if (!VerifyAspectsPresent(region.dstSubresource.aspectMask, dst_format)) { std::stringstream ss; ss << "vkCmdCopyImage(): pRegion[" << i << "] dstSubresource.aspectMask cannot specify aspects not present in dest image"; skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-aspectMask-00143", "%s.", ss.str().c_str()); } // Each dimension offset + extent limits must fall with image subresource extent VkExtent3D subresource_extent = GetImageSubresourceExtent(src_image_state, &(region.srcSubresource)); if (slice_override) src_copy_extent.depth = depth_slices; uint32_t extent_check = ExceedsBounds(&(region.srcOffset), &src_copy_extent, &subresource_extent); if (extent_check & x_bit) { skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-srcOffset-00144", "vkCmdCopyImage(): Source image pRegion %1d x-dimension offset [%1d] + extent [%1d] exceeds subResource " "width [%1d].", i, region.srcOffset.x, src_copy_extent.width, subresource_extent.width); } if (extent_check & y_bit) { skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-srcOffset-00145", "vkCmdCopyImage(): Source image pRegion %1d y-dimension offset [%1d] + extent [%1d] exceeds subResource " "height [%1d].", i, region.srcOffset.y, src_copy_extent.height, subresource_extent.height); } if (extent_check & z_bit) { skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-srcOffset-00147", "vkCmdCopyImage(): Source image pRegion %1d z-dimension offset [%1d] + extent [%1d] exceeds subResource " "depth [%1d].", i, region.srcOffset.z, src_copy_extent.depth, subresource_extent.depth); } // Adjust dest extent if necessary subresource_extent = GetImageSubresourceExtent(dst_image_state, &(region.dstSubresource)); if (slice_override) dst_copy_extent.depth = depth_slices; extent_check = ExceedsBounds(&(region.dstOffset), &dst_copy_extent, &subresource_extent); if (extent_check & x_bit) { skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-dstOffset-00150", "vkCmdCopyImage(): Dest image pRegion %1d x-dimension offset [%1d] + extent [%1d] exceeds subResource " "width [%1d].", i, region.dstOffset.x, dst_copy_extent.width, subresource_extent.width); } if (extent_check & y_bit) { skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-dstOffset-00151", "vkCmdCopyImage(): Dest image pRegion %1d y-dimension offset [%1d] + extent [%1d] exceeds subResource " "height [%1d].", i, region.dstOffset.y, dst_copy_extent.height, subresource_extent.height); } if (extent_check & z_bit) { skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-dstOffset-00153", "vkCmdCopyImage(): Dest image pRegion %1d z-dimension offset [%1d] + extent [%1d] exceeds subResource " "depth [%1d].", i, region.dstOffset.z, dst_copy_extent.depth, subresource_extent.depth); } // The union of all source regions, and the union of all destination regions, specified by the elements of regions, // must not overlap in memory if (src_image_state->image == dst_image_state->image) { for (uint32_t j = 0; j < regionCount; j++) { if (RegionIntersects(&region, &pRegions[j], src_image_state->createInfo.imageType, FormatIsMultiplane(src_format))) { std::stringstream ss; ss << "vkCmdCopyImage(): pRegions[" << i << "] src overlaps with pRegions[" << j << "]."; skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-pRegions-00124", "%s.", ss.str().c_str()); } } } // Check depth for 2D as post Maintaince 1 requires both while prior only required one to be 2D if (device_extensions.vk_khr_maintenance1) { if (((VK_IMAGE_TYPE_2D == src_image_state->createInfo.imageType) && (VK_IMAGE_TYPE_2D == dst_image_state->createInfo.imageType)) && (src_copy_extent.depth != 1)) { skip |= LogError( command_buffer, "VUID-vkCmdCopyImage-srcImage-01790", "vkCmdCopyImage(): pRegion[%u] both srcImage and dstImage are 2D and extent.depth is %u and has to be 1", i, src_copy_extent.depth); } } else { if (((VK_IMAGE_TYPE_2D == src_image_state->createInfo.imageType) || (VK_IMAGE_TYPE_2D == dst_image_state->createInfo.imageType)) && (src_copy_extent.depth != 1)) { skip |= LogError( command_buffer, "VUID-vkCmdCopyImage-srcImage-01789", "vkCmdCopyImage(): pRegion[%u] either srcImage or dstImage is 2D and extent.depth is %u and has to be 1", i, src_copy_extent.depth); } } // Check if 2D with 3D and depth not equal to 2D layerCount if ((VK_IMAGE_TYPE_2D == src_image_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType) && (src_copy_extent.depth != region.srcSubresource.layerCount)) { skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-srcImage-01791", "vkCmdCopyImage(): pRegion[%u] srcImage is 2D, dstImage is 3D and extent.depth is %u and has to be " "srcSubresource.layerCount (%u)", i, src_copy_extent.depth, region.srcSubresource.layerCount); } else if ((VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType) && (VK_IMAGE_TYPE_2D == dst_image_state->createInfo.imageType) && (src_copy_extent.depth != region.dstSubresource.layerCount)) { skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-dstImage-01792", "vkCmdCopyImage(): pRegion[%u] srcImage is 3D, dstImage is 2D and extent.depth is %u and has to be " "dstSubresource.layerCount (%u)", i, src_copy_extent.depth, region.dstSubresource.layerCount); } // Check for multi-plane format compatiblity if (FormatIsMultiplane(src_format) || FormatIsMultiplane(dst_format)) { size_t src_format_size = 0; size_t dst_format_size = 0; if (FormatIsMultiplane(src_format)) { const VkFormat planeFormat = FindMultiplaneCompatibleFormat(src_format, region.srcSubresource.aspectMask); src_format_size = FormatElementSize(planeFormat); } else { src_format_size = FormatElementSize(src_format); } if (FormatIsMultiplane(dst_format)) { const VkFormat planeFormat = FindMultiplaneCompatibleFormat(dst_format, region.dstSubresource.aspectMask); dst_format_size = FormatElementSize(planeFormat); } else { dst_format_size = FormatElementSize(dst_format); } // If size is still zero, then format is invalid and will be caught in another VU if ((src_format_size != dst_format_size) && (src_format_size != 0) && (dst_format_size != 0)) { skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-None-01549", "vkCmdCopyImage(): pRegions[%u] called with non-compatible image formats. " "The src format %s with aspectMask %s is not compatible with dst format %s aspectMask %s.", i, string_VkFormat(src_format), string_VkImageAspectFlags(region.srcSubresource.aspectMask).c_str(), string_VkFormat(dst_format), string_VkImageAspectFlags(region.dstSubresource.aspectMask).c_str()); } } } // The formats of non-multiplane src_image and dst_image must be compatible. Formats are considered compatible if their texel // size in bytes is the same between both formats. For example, VK_FORMAT_R8G8B8A8_UNORM is compatible with VK_FORMAT_R32_UINT // because because both texels are 4 bytes in size. if (!FormatIsMultiplane(src_format) && !FormatIsMultiplane(dst_format)) { const char *compatible_vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion) ? "VUID-vkCmdCopyImage-srcImage-01548" : "VUID-vkCmdCopyImage-srcImage-00135"; // Depth/stencil formats must match exactly. if (FormatIsDepthOrStencil(src_format) || FormatIsDepthOrStencil(dst_format)) { if (src_format != dst_format) { skip |= LogError(command_buffer, compatible_vuid, "vkCmdCopyImage(): Depth/stencil formats must match exactly for src (%s) and dst (%s).", string_VkFormat(src_format), string_VkFormat(dst_format)); } } else { if (!FormatSizesAreEqual(src_format, dst_format, regionCount, pRegions)) { skip |= LogError(command_buffer, compatible_vuid, "vkCmdCopyImage(): Unmatched image format sizes. " "The src format %s has size of %zu and dst format %s has size of %zu.", string_VkFormat(src_format), FormatElementSize(src_format), string_VkFormat(dst_format), FormatElementSize(dst_format)); } } } // Source and dest image sample counts must match if (src_image_state->createInfo.samples != dst_image_state->createInfo.samples) { char const str[] = "vkCmdCopyImage() called on image pair with non-identical sample counts."; skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-srcImage-00136", "%s", str); } skip |= ValidateMemoryIsBoundToImage(src_image_state, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-srcImage-00127"); skip |= ValidateMemoryIsBoundToImage(dst_image_state, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-dstImage-00132"); // Validate that SRC & DST images have correct usage flags set skip |= ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdCopyImage-srcImage-00126", "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT"); skip |= ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyImage-dstImage-00131", "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT"); skip |= ValidateProtectedImage(cb_node, src_image_state, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-commandBuffer-01825"); skip |= ValidateProtectedImage(cb_node, dst_image_state, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-commandBuffer-01826"); skip |= ValidateUnprotectedImage(cb_node, dst_image_state, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-commandBuffer-01827"); // Validation for VK_EXT_fragment_density_map if (src_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) { skip |= LogError( command_buffer, "VUID-vkCmdCopyImage-dstImage-02542", "vkCmdCopyImage(): srcImage must not have been created with flags containing VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT"); } if (dst_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) { skip |= LogError( command_buffer, "VUID-vkCmdCopyImage-dstImage-02542", "vkCmdCopyImage(): dstImage must not have been created with flags containing VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT"); } if (device_extensions.vk_khr_maintenance1) { skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-srcImage-01995"); skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-dstImage-01996"); } skip |= ValidateCmdQueueFlags(cb_node, "vkCmdCopyImage()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdCopyImage-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()"); skip |= InsideRenderPass(cb_node, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-renderpass"); bool hit_error = false; const char *invalid_src_layout_vuid = (src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image) ? "VUID-vkCmdCopyImage-srcImageLayout-01917" : "VUID-vkCmdCopyImage-srcImageLayout-00129"; const char *invalid_dst_layout_vuid = (dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image) ? "VUID-vkCmdCopyImage-dstImageLayout-01395" : "VUID-vkCmdCopyImage-dstImageLayout-00134"; for (uint32_t i = 0; i < regionCount; ++i) { skip |= VerifyImageLayout(cb_node, src_image_state, pRegions[i].srcSubresource, srcImageLayout, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdCopyImage()", invalid_src_layout_vuid, "VUID-vkCmdCopyImage-srcImageLayout-00128", &hit_error); skip |= VerifyImageLayout(cb_node, dst_image_state, pRegions[i].dstSubresource, dstImageLayout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdCopyImage()", invalid_dst_layout_vuid, "VUID-vkCmdCopyImage-dstImageLayout-00133", &hit_error); skip |= ValidateCopyImageTransferGranularityRequirements(cb_node, src_image_state, dst_image_state, &pRegions[i], i, "vkCmdCopyImage()"); } return skip; } void CoreChecks::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) { StateTracker::PreCallRecordCmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions); auto cb_node = GetCBState(commandBuffer); auto src_image_state = GetImageState(srcImage); auto dst_image_state = GetImageState(dstImage); // Make sure that all image slices are updated to correct layout for (uint32_t i = 0; i < regionCount; ++i) { SetImageInitialLayout(cb_node, *src_image_state, pRegions[i].srcSubresource, srcImageLayout); SetImageInitialLayout(cb_node, *dst_image_state, pRegions[i].dstSubresource, dstImageLayout); } } // Returns true if sub_rect is entirely contained within rect static inline bool ContainsRect(VkRect2D rect, VkRect2D sub_rect) { if ((sub_rect.offset.x < rect.offset.x) || (sub_rect.offset.x + sub_rect.extent.width > rect.offset.x + rect.extent.width) || (sub_rect.offset.y < rect.offset.y) || (sub_rect.offset.y + sub_rect.extent.height > rect.offset.y + rect.extent.height)) return false; return true; } bool CoreChecks::ValidateClearAttachmentExtent(VkCommandBuffer command_buffer, uint32_t attachment_index, const FRAMEBUFFER_STATE *framebuffer, uint32_t fb_attachment, const VkRect2D &render_area, uint32_t rect_count, const VkClearRect *clear_rects) const { bool skip = false; const IMAGE_VIEW_STATE *image_view_state = nullptr; if (framebuffer && (fb_attachment != VK_ATTACHMENT_UNUSED) && (fb_attachment < framebuffer->createInfo.attachmentCount)) { image_view_state = GetAttachmentImageViewState(GetCBState(command_buffer), framebuffer, fb_attachment); } for (uint32_t j = 0; j < rect_count; j++) { if (!ContainsRect(render_area, clear_rects[j].rect)) { skip |= LogError(command_buffer, "VUID-vkCmdClearAttachments-pRects-00016", "vkCmdClearAttachments(): The area defined by pRects[%d] is not contained in the area of " "the current render pass instance.", j); } if (image_view_state) { // The layers specified by a given element of pRects must be contained within every attachment that // pAttachments refers to const auto attachment_layer_count = image_view_state->create_info.subresourceRange.layerCount; if ((clear_rects[j].baseArrayLayer >= attachment_layer_count) || (clear_rects[j].baseArrayLayer + clear_rects[j].layerCount > attachment_layer_count)) { skip |= LogError(command_buffer, "VUID-vkCmdClearAttachments-pRects-00017", "vkCmdClearAttachments(): The layers defined in pRects[%d] are not contained in the layers " "of pAttachment[%d].", j, attachment_index); } } } return skip; } bool CoreChecks::PreCallValidateCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment *pAttachments, uint32_t rectCount, const VkClearRect *pRects) const { bool skip = false; const CMD_BUFFER_STATE *cb_node = GetCBState(commandBuffer); // TODO: Should be const, and never modified during validation if (!cb_node) return skip; skip |= ValidateCmdQueueFlags(cb_node, "vkCmdClearAttachments()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdClearAttachments-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()"); skip |= OutsideRenderPass(cb_node, "vkCmdClearAttachments()", "VUID-vkCmdClearAttachments-renderpass"); // Validate that attachment is in reference list of active subpass if (cb_node->activeRenderPass) { const VkRenderPassCreateInfo2KHR *renderpass_create_info = cb_node->activeRenderPass->createInfo.ptr(); const uint32_t renderpass_attachment_count = renderpass_create_info->attachmentCount; const VkSubpassDescription2KHR *subpass_desc = &renderpass_create_info->pSubpasses[cb_node->activeSubpass]; const auto *framebuffer = cb_node->activeFramebuffer.get(); const auto &render_area = cb_node->activeRenderPassBeginInfo.renderArea; for (uint32_t attachment_index = 0; attachment_index < attachmentCount; attachment_index++) { auto clear_desc = &pAttachments[attachment_index]; uint32_t fb_attachment = VK_ATTACHMENT_UNUSED; if (0 == clear_desc->aspectMask) { skip |= LogError(commandBuffer, "VUID-VkClearAttachment-aspectMask-requiredbitmask", " "); } else if (clear_desc->aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) { skip |= LogError(commandBuffer, "VUID-VkClearAttachment-aspectMask-00020", " "); } else if (clear_desc->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) { uint32_t color_attachment = VK_ATTACHMENT_UNUSED; if (clear_desc->colorAttachment < subpass_desc->colorAttachmentCount) { color_attachment = subpass_desc->pColorAttachments[clear_desc->colorAttachment].attachment; if ((color_attachment != VK_ATTACHMENT_UNUSED) && (color_attachment >= renderpass_attachment_count)) { skip |= LogError( commandBuffer, "VUID-vkCmdClearAttachments-aspectMask-02501", "vkCmdClearAttachments() pAttachments[%u].colorAttachment=%u is not VK_ATTACHMENT_UNUSED " "and not a valid attachment for %s attachmentCount=%u. Subpass %u pColorAttachment[%u]=%u.", attachment_index, clear_desc->colorAttachment, report_data->FormatHandle(cb_node->activeRenderPass->renderPass).c_str(), cb_node->activeSubpass, clear_desc->colorAttachment, color_attachment, renderpass_attachment_count); color_attachment = VK_ATTACHMENT_UNUSED; // Defensive, prevent lookup past end of renderpass attachment } } else { skip |= LogError(commandBuffer, "VUID-vkCmdClearAttachments-aspectMask-02501", "vkCmdClearAttachments() pAttachments[%u].colorAttachment=%u out of range for %s" " subpass %u. colorAttachmentCount=%u", attachment_index, clear_desc->colorAttachment, report_data->FormatHandle(cb_node->activeRenderPass->renderPass).c_str(), cb_node->activeSubpass, subpass_desc->colorAttachmentCount); } fb_attachment = color_attachment; if ((clear_desc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) || (clear_desc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) { char const str[] = "vkCmdClearAttachments() aspectMask [%d] must set only VK_IMAGE_ASPECT_COLOR_BIT of a color attachment."; skip |= LogError(commandBuffer, "VUID-VkClearAttachment-aspectMask-00019", str, attachment_index); } } else { // Must be depth and/or stencil if (((clear_desc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) && ((clear_desc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT)) { char const str[] = "vkCmdClearAttachments() aspectMask [%d] is not a valid combination of bits."; skip |= LogError(commandBuffer, "VUID-VkClearAttachment-aspectMask-parameter", str, attachment_index); } if (!subpass_desc->pDepthStencilAttachment || (subpass_desc->pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED)) { skip |= LogPerformanceWarning( commandBuffer, kVUID_Core_DrawState_MissingAttachmentReference, "vkCmdClearAttachments() depth/stencil clear with no depth/stencil attachment in subpass; ignored"); } else { fb_attachment = subpass_desc->pDepthStencilAttachment->attachment; } } if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) { skip |= ValidateClearAttachmentExtent(commandBuffer, attachment_index, framebuffer, fb_attachment, render_area, rectCount, pRects); } // Once the framebuffer attachment is found, can get the image view state if (framebuffer && (fb_attachment != VK_ATTACHMENT_UNUSED) && (fb_attachment < framebuffer->createInfo.attachmentCount)) { const IMAGE_VIEW_STATE *image_view_state = GetAttachmentImageViewState(GetCBState(commandBuffer), framebuffer, fb_attachment); if (image_view_state != nullptr) { skip |= ValidateProtectedImage(cb_node, image_view_state->image_state.get(), "vkCmdClearAttachments()", "VUID-vkCmdClearAttachments-commandBuffer-02504"); skip |= ValidateUnprotectedImage(cb_node, image_view_state->image_state.get(), "vkCmdClearAttachments()", "VUID-vkCmdClearAttachments-commandBuffer-02505"); } } } } return skip; } void CoreChecks::PreCallRecordCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment *pAttachments, uint32_t rectCount, const VkClearRect *pRects) { auto *cb_node = GetCBState(commandBuffer); if (cb_node->activeRenderPass && (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)) { const VkRenderPassCreateInfo2KHR *renderpass_create_info = cb_node->activeRenderPass->createInfo.ptr(); const VkSubpassDescription2KHR *subpass_desc = &renderpass_create_info->pSubpasses[cb_node->activeSubpass]; std::shared_ptr<std::vector<VkClearRect>> clear_rect_copy; for (uint32_t attachment_index = 0; attachment_index < attachmentCount; attachment_index++) { const auto clear_desc = &pAttachments[attachment_index]; uint32_t fb_attachment = VK_ATTACHMENT_UNUSED; if ((clear_desc->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) && (clear_desc->colorAttachment < subpass_desc->colorAttachmentCount)) { fb_attachment = subpass_desc->pColorAttachments[clear_desc->colorAttachment].attachment; } else if ((clear_desc->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) && subpass_desc->pDepthStencilAttachment) { fb_attachment = subpass_desc->pDepthStencilAttachment->attachment; } if (fb_attachment != VK_ATTACHMENT_UNUSED) { if (!clear_rect_copy) { // We need a copy of the clear rectangles that will persist until the last lambda executes // but we want to create it as lazily as possible clear_rect_copy.reset(new std::vector<VkClearRect>(pRects, pRects + rectCount)); } // if a secondary level command buffer inherits the framebuffer from the primary command buffer // (see VkCommandBufferInheritanceInfo), this validation must be deferred until queue submit time auto val_fn = [this, commandBuffer, attachment_index, fb_attachment, rectCount, clear_rect_copy]( const CMD_BUFFER_STATE *prim_cb, const FRAMEBUFFER_STATE *fb) { assert(rectCount == clear_rect_copy->size()); const auto &render_area = prim_cb->activeRenderPassBeginInfo.renderArea; bool skip = false; skip = ValidateClearAttachmentExtent(commandBuffer, attachment_index, fb, fb_attachment, render_area, rectCount, clear_rect_copy->data()); return skip; }; cb_node->cmd_execute_commands_functions.emplace_back(val_fn); } } } } bool CoreChecks::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) const { const auto *cb_node = GetCBState(commandBuffer); const auto *src_image_state = GetImageState(srcImage); const auto *dst_image_state = GetImageState(dstImage); bool skip = false; if (cb_node && src_image_state && dst_image_state) { skip |= ValidateMemoryIsBoundToImage(src_image_state, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-srcImage-00256"); skip |= ValidateMemoryIsBoundToImage(dst_image_state, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-dstImage-00258"); skip |= ValidateCmdQueueFlags(cb_node, "vkCmdResolveImage()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdResolveImage-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()"); skip |= InsideRenderPass(cb_node, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-renderpass"); skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-dstImage-02003"); skip |= ValidateProtectedImage(cb_node, src_image_state, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-commandBuffer-01837"); skip |= ValidateProtectedImage(cb_node, dst_image_state, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-commandBuffer-01838"); skip |= ValidateUnprotectedImage(cb_node, dst_image_state, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-commandBuffer-01839"); // Validation for VK_EXT_fragment_density_map if (src_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) { skip |= LogError(cb_node->commandBuffer, "vkCmdResolveImage-dstImage-02546", "vkCmdResolveImage(): srcImage must not have been created with flags containing " "VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT"); } if (dst_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) { skip |= LogError(cb_node->commandBuffer, "vkCmdResolveImage-dstImage-02546", "vkCmdResolveImage(): dstImage must not have been created with flags containing " "VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT"); } bool hit_error = false; const char *invalid_src_layout_vuid = (src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image) ? "VUID-vkCmdResolveImage-srcImageLayout-01400" : "VUID-vkCmdResolveImage-srcImageLayout-00261"; const char *invalid_dst_layout_vuid = (dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image) ? "VUID-vkCmdResolveImage-dstImageLayout-01401" : "VUID-vkCmdResolveImage-dstImageLayout-00263"; // For each region, the number of layers in the image subresource should not be zero // For each region, src and dest image aspect must be color only for (uint32_t i = 0; i < regionCount; i++) { const VkImageResolve region = pRegions[i]; const VkImageSubresourceLayers src_subresource = region.srcSubresource; const VkImageSubresourceLayers dst_subresource = region.dstSubresource; skip |= ValidateImageSubresourceLayers(cb_node, &src_subresource, "vkCmdResolveImage()", "srcSubresource", i); skip |= ValidateImageSubresourceLayers(cb_node, &dst_subresource, "vkCmdResolveImage()", "dstSubresource", i); skip |= VerifyImageLayout(cb_node, src_image_state, src_subresource, srcImageLayout, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdResolveImage()", invalid_src_layout_vuid, "VUID-vkCmdResolveImage-srcImageLayout-00260", &hit_error); skip |= VerifyImageLayout(cb_node, dst_image_state, dst_subresource, dstImageLayout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdResolveImage()", invalid_dst_layout_vuid, "VUID-vkCmdResolveImage-dstImageLayout-00262", &hit_error); skip |= ValidateImageMipLevel(cb_node, src_image_state, src_subresource.mipLevel, i, "vkCmdResolveImage()", "srcSubresource", "VUID-vkCmdResolveImage-srcSubresource-01709"); skip |= ValidateImageMipLevel(cb_node, dst_image_state, dst_subresource.mipLevel, i, "vkCmdResolveImage()", "dstSubresource", "VUID-vkCmdResolveImage-dstSubresource-01710"); skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, src_subresource.baseArrayLayer, src_subresource.layerCount, i, "vkCmdResolveImage()", "srcSubresource", "VUID-vkCmdResolveImage-srcSubresource-01711"); skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, dst_subresource.baseArrayLayer, dst_subresource.layerCount, i, "vkCmdResolveImage()", "srcSubresource", "VUID-vkCmdResolveImage-dstSubresource-01712"); // layer counts must match if (src_subresource.layerCount != dst_subresource.layerCount) { skip |= LogError( cb_node->commandBuffer, "VUID-VkImageResolve-layerCount-00267", "vkCmdResolveImage(): layerCount in source and destination subresource of pRegions[%u] does not match.", i); } // For each region, src and dest image aspect must be color only if ((src_subresource.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) || (dst_subresource.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT)) { skip |= LogError( cb_node->commandBuffer, "VUID-VkImageResolve-aspectMask-00266", "vkCmdResolveImage(): src and dest aspectMasks for pRegions[%u] must specify only VK_IMAGE_ASPECT_COLOR_BIT.", i); } const VkImageType src_image_type = src_image_state->createInfo.imageType; const VkImageType dst_image_type = dst_image_state->createInfo.imageType; if ((VK_IMAGE_TYPE_3D == src_image_type) || (VK_IMAGE_TYPE_3D == dst_image_type)) { if ((0 != src_subresource.baseArrayLayer) || (1 != src_subresource.layerCount)) { LogObjectList objlist(cb_node->commandBuffer); objlist.add(src_image_state->image); objlist.add(dst_image_state->image); skip |= LogError(objlist, "VUID-vkCmdResolveImage-srcImage-04446", "vkCmdResolveImage(): pRegions[%u] baseArrayLayer must be 0 and layerCount must be 1 for all " "subresources if the src or dst image is 3D.", i); } if ((0 != dst_subresource.baseArrayLayer) || (1 != dst_subresource.layerCount)) { LogObjectList objlist(cb_node->commandBuffer); objlist.add(src_image_state->image); objlist.add(dst_image_state->image); skip |= LogError(objlist, "VUID-vkCmdResolveImage-srcImage-04447", "vkCmdResolveImage(): pRegions[%u] baseArrayLayer must be 0 and layerCount must be 1 for all " "subresources if the src or dst image is 3D.", i); } } if (VK_IMAGE_TYPE_1D == src_image_type) { if ((pRegions[i].srcOffset.y != 0) || (pRegions[i].extent.height != 1)) { LogObjectList objlist(cb_node->commandBuffer); objlist.add(src_image_state->image); skip |= LogError(objlist, "VUID-vkCmdResolveImage-srcImage-00271", "vkCmdResolveImage(): srcImage (%s) is 1D but pRegions[%u] srcOffset.y (%d) is not 0 or " "extent.height (%u) is not 1.", report_data->FormatHandle(src_image_state->image).c_str(), i, pRegions[i].srcOffset.y, pRegions[i].extent.height); } } if ((VK_IMAGE_TYPE_1D == src_image_type) || (VK_IMAGE_TYPE_2D == src_image_type)) { if ((pRegions[i].srcOffset.z != 0) || (pRegions[i].extent.depth != 1)) { LogObjectList objlist(cb_node->commandBuffer); objlist.add(src_image_state->image); skip |= LogError(objlist, "VUID-vkCmdResolveImage-srcImage-00273", "vkCmdResolveImage(): srcImage (%s) is 2D but pRegions[%u] srcOffset.z (%d) is not 0 or " "extent.depth (%u) is not 1.", report_data->FormatHandle(src_image_state->image).c_str(), i, pRegions[i].srcOffset.z, pRegions[i].extent.depth); } } if (VK_IMAGE_TYPE_1D == dst_image_type) { if ((pRegions[i].dstOffset.y != 0) || (pRegions[i].extent.height != 1)) { LogObjectList objlist(cb_node->commandBuffer); objlist.add(dst_image_state->image); skip |= LogError(objlist, "VUID-vkCmdResolveImage-dstImage-00276", "vkCmdResolveImage(): dstImage (%s) is 1D but pRegions[%u] dstOffset.y (%d) is not 0 or " "extent.height (%u) is not 1.", report_data->FormatHandle(dst_image_state->image).c_str(), i, pRegions[i].dstOffset.y, pRegions[i].extent.height); } } if ((VK_IMAGE_TYPE_1D == dst_image_type) || (VK_IMAGE_TYPE_2D == dst_image_type)) { if ((pRegions[i].dstOffset.z != 0) || (pRegions[i].extent.depth != 1)) { LogObjectList objlist(cb_node->commandBuffer); objlist.add(dst_image_state->image); skip |= LogError(objlist, "VUID-vkCmdResolveImage-dstImage-00278", "vkCmdResolveImage(): dstImage (%s) is 2D but pRegions[%u] dstOffset.z (%d) is not 0 or " "extent.depth (%u) is not 1.", report_data->FormatHandle(dst_image_state->image).c_str(), i, pRegions[i].dstOffset.z, pRegions[i].extent.depth); } } // Each srcImage dimension offset + extent limits must fall with image subresource extent VkExtent3D subresource_extent = GetImageSubresourceExtent(src_image_state, &src_subresource); // MipLevel bound is checked already and adding extra errors with a "subresource extent of zero" is confusing to // developer if (src_subresource.mipLevel < src_image_state->createInfo.mipLevels) { uint32_t extent_check = ExceedsBounds(&(region.srcOffset), &(region.extent), &subresource_extent); if ((extent_check & x_bit) != 0) { LogObjectList objlist(cb_node->commandBuffer); objlist.add(src_image_state->image); skip |= LogError(objlist, "VUID-vkCmdResolveImage-srcOffset-00269", "vkCmdResolveImage(): srcImage (%s) pRegions[%u] x-dimension offset [%1d] + extent [%u] " "exceeds subResource width [%u].", report_data->FormatHandle(src_image_state->image).c_str(), i, region.srcOffset.x, region.extent.width, subresource_extent.width); } if ((extent_check & y_bit) != 0) { LogObjectList objlist(cb_node->commandBuffer); objlist.add(src_image_state->image); skip |= LogError(objlist, "VUID-vkCmdResolveImage-srcOffset-00270", "vkCmdResolveImage(): srcImage (%s) pRegions[%u] y-dimension offset [%1d] + extent [%u] " "exceeds subResource height [%u].", report_data->FormatHandle(src_image_state->image).c_str(), i, region.srcOffset.y, region.extent.height, subresource_extent.height); } if ((extent_check & z_bit) != 0) { LogObjectList objlist(cb_node->commandBuffer); objlist.add(src_image_state->image); skip |= LogError(objlist, "VUID-vkCmdResolveImage-srcOffset-00272", "vkCmdResolveImage(): srcImage (%s) pRegions[%u] z-dimension offset [%1d] + extent [%u] " "exceeds subResource depth [%u].", report_data->FormatHandle(src_image_state->image).c_str(), i, region.srcOffset.z, region.extent.depth, subresource_extent.depth); } } // Each dstImage dimension offset + extent limits must fall with image subresource extent subresource_extent = GetImageSubresourceExtent(dst_image_state, &dst_subresource); // MipLevel bound is checked already and adding extra errors with a "subresource extent of zero" is confusing to // developer if (dst_subresource.mipLevel < dst_image_state->createInfo.mipLevels) { uint32_t extent_check = ExceedsBounds(&(region.dstOffset), &(region.extent), &subresource_extent); if ((extent_check & x_bit) != 0) { LogObjectList objlist(cb_node->commandBuffer); objlist.add(dst_image_state->image); skip |= LogError(objlist, "VUID-vkCmdResolveImage-dstOffset-00274", "vkCmdResolveImage(): dstImage (%s) pRegions[%u] x-dimension offset [%1d] + extent [%u] " "exceeds subResource width [%u].", report_data->FormatHandle(dst_image_state->image).c_str(), i, region.srcOffset.x, region.extent.width, subresource_extent.width); } if ((extent_check & y_bit) != 0) { LogObjectList objlist(cb_node->commandBuffer); objlist.add(dst_image_state->image); skip |= LogError(objlist, "VUID-vkCmdResolveImage-dstOffset-00275", "vkCmdResolveImage(): dstImage (%s) pRegions[%u] y-dimension offset [%1d] + extent [%u] " "exceeds subResource height [%u].", report_data->FormatHandle(dst_image_state->image).c_str(), i, region.srcOffset.y, region.extent.height, subresource_extent.height); } if ((extent_check & z_bit) != 0) { LogObjectList objlist(cb_node->commandBuffer); objlist.add(dst_image_state->image); skip |= LogError(objlist, "VUID-vkCmdResolveImage-dstOffset-00277", "vkCmdResolveImage(): dstImage (%s) pRegions[%u] z-dimension offset [%1d] + extent [%u] " "exceeds subResource depth [%u].", report_data->FormatHandle(dst_image_state->image).c_str(), i, region.srcOffset.z, region.extent.depth, subresource_extent.depth); } } } if (src_image_state->createInfo.format != dst_image_state->createInfo.format) { skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdResolveImage-srcImage-01386", "vkCmdResolveImage(): srcImage format (%s) and dstImage format (%s) are not the same.", string_VkFormat(src_image_state->createInfo.format), string_VkFormat(dst_image_state->createInfo.format)); } if (src_image_state->createInfo.imageType != dst_image_state->createInfo.imageType) { skip |= LogWarning(cb_node->commandBuffer, kVUID_Core_DrawState_MismatchedImageType, "vkCmdResolveImage(): srcImage type (%s) and dstImage type (%s) are not the same.", string_VkImageType(src_image_state->createInfo.imageType), string_VkImageType(dst_image_state->createInfo.imageType)); } if (src_image_state->createInfo.samples == VK_SAMPLE_COUNT_1_BIT) { skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdResolveImage-srcImage-00257", "vkCmdResolveImage(): srcImage sample count is VK_SAMPLE_COUNT_1_BIT."); } if (dst_image_state->createInfo.samples != VK_SAMPLE_COUNT_1_BIT) { skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdResolveImage-dstImage-00259", "vkCmdResolveImage(): dstImage sample count (%s) is not VK_SAMPLE_COUNT_1_BIT.", string_VkSampleCountFlagBits(dst_image_state->createInfo.samples)); } } else { assert(0); } return skip; } bool CoreChecks::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) const { const auto *cb_node = GetCBState(commandBuffer); const auto *src_image_state = GetImageState(srcImage); const auto *dst_image_state = GetImageState(dstImage); bool skip = false; if (cb_node) { skip |= ValidateCmd(cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()"); } if (cb_node && src_image_state && dst_image_state) { skip |= ValidateImageSampleCount(src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): srcImage", "VUID-vkCmdBlitImage-srcImage-00233"); skip |= ValidateImageSampleCount(dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): dstImage", "VUID-vkCmdBlitImage-dstImage-00234"); skip |= ValidateMemoryIsBoundToImage(src_image_state, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-srcImage-00220"); skip |= ValidateMemoryIsBoundToImage(dst_image_state, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-dstImage-00225"); skip |= ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdBlitImage-srcImage-00219", "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT"); skip |= ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdBlitImage-dstImage-00224", "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT"); skip |= ValidateCmdQueueFlags(cb_node, "vkCmdBlitImage()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdBlitImage-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()"); skip |= InsideRenderPass(cb_node, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-renderpass"); skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_BLIT_SRC_BIT, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-srcImage-01999"); skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_BLIT_DST_BIT, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-dstImage-02000"); skip |= ValidateProtectedImage(cb_node, src_image_state, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-commandBuffer-01834"); skip |= ValidateProtectedImage(cb_node, dst_image_state, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-commandBuffer-01835"); skip |= ValidateUnprotectedImage(cb_node, dst_image_state, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-commandBuffer-01836"); // Validation for VK_EXT_fragment_density_map if (src_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) { skip |= LogError( cb_node->commandBuffer, "VUID-vkCmdBlitImage-dstImage-02545", "vkCmdBlitImage(): srcImage must not have been created with flags containing VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT"); } if (dst_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) { skip |= LogError( cb_node->commandBuffer, "VUID-vkCmdBlitImage-dstImage-02545", "vkCmdBlitImage(): dstImage must not have been created with flags containing VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT"); } // TODO: Need to validate image layouts, which will include layout validation for shared presentable images VkFormat src_format = src_image_state->createInfo.format; VkFormat dst_format = dst_image_state->createInfo.format; VkImageType src_type = src_image_state->createInfo.imageType; VkImageType dst_type = dst_image_state->createInfo.imageType; if (VK_FILTER_LINEAR == filter) { skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-filter-02001"); } else if (VK_FILTER_CUBIC_IMG == filter) { skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-filter-02002"); } if (FormatRequiresYcbcrConversion(src_format)) { skip |= LogError(device, "VUID-vkCmdBlitImage-srcImage-01561", "vkCmdBlitImage(): srcImage format (%s) must not be one of the formats requiring sampler YCBCR " "conversion for VK_IMAGE_ASPECT_COLOR_BIT image views", string_VkFormat(src_format)); } if (FormatRequiresYcbcrConversion(dst_format)) { skip |= LogError(device, "VUID-vkCmdBlitImage-dstImage-01562", "vkCmdBlitImage(): dstImage format (%s) must not be one of the formats requiring sampler YCBCR " "conversion for VK_IMAGE_ASPECT_COLOR_BIT image views", string_VkFormat(dst_format)); } if ((VK_FILTER_CUBIC_IMG == filter) && (VK_IMAGE_TYPE_3D != src_type)) { skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-filter-00237", "vkCmdBlitImage(): source image type must be VK_IMAGE_TYPE_3D when cubic filtering is specified."); } // Validate consistency for unsigned formats if (FormatIsUInt(src_format) != FormatIsUInt(dst_format)) { std::stringstream ss; ss << "vkCmdBlitImage(): If one of srcImage and dstImage images has unsigned integer format, " << "the other one must also have unsigned integer format. " << "Source format is " << string_VkFormat(src_format) << " Destination format is " << string_VkFormat(dst_format); skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcImage-00230", "%s.", ss.str().c_str()); } // Validate consistency for signed formats if (FormatIsSInt(src_format) != FormatIsSInt(dst_format)) { std::stringstream ss; ss << "vkCmdBlitImage(): If one of srcImage and dstImage images has signed integer format, " << "the other one must also have signed integer format. " << "Source format is " << string_VkFormat(src_format) << " Destination format is " << string_VkFormat(dst_format); skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcImage-00229", "%s.", ss.str().c_str()); } // Validate filter for Depth/Stencil formats if (FormatIsDepthOrStencil(src_format) && (filter != VK_FILTER_NEAREST)) { std::stringstream ss; ss << "vkCmdBlitImage(): If the format of srcImage is a depth, stencil, or depth stencil " << "then filter must be VK_FILTER_NEAREST."; skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcImage-00232", "%s.", ss.str().c_str()); } // Validate aspect bits and formats for depth/stencil images if (FormatIsDepthOrStencil(src_format) || FormatIsDepthOrStencil(dst_format)) { if (src_format != dst_format) { std::stringstream ss; ss << "vkCmdBlitImage(): If one of srcImage and dstImage images has a format of depth, stencil or depth " << "stencil, the other one must have exactly the same format. " << "Source format is " << string_VkFormat(src_format) << " Destination format is " << string_VkFormat(dst_format); skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcImage-00231", "%s.", ss.str().c_str()); } } // Depth or Stencil // Do per-region checks const char *invalid_src_layout_vuid = (src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image) ? "VUID-vkCmdBlitImage-srcImageLayout-01398" : "VUID-vkCmdBlitImage-srcImageLayout-00222"; const char *invalid_dst_layout_vuid = (dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image) ? "VUID-vkCmdBlitImage-dstImageLayout-01399" : "VUID-vkCmdBlitImage-dstImageLayout-00227"; for (uint32_t i = 0; i < regionCount; i++) { const VkImageBlit rgn = pRegions[i]; bool hit_error = false; skip |= VerifyImageLayout(cb_node, src_image_state, rgn.srcSubresource, srcImageLayout, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdBlitImage()", invalid_src_layout_vuid, "VUID-vkCmdBlitImage-srcImageLayout-00221", &hit_error); skip |= VerifyImageLayout(cb_node, dst_image_state, rgn.dstSubresource, dstImageLayout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdBlitImage()", invalid_dst_layout_vuid, "VUID-vkCmdBlitImage-dstImageLayout-00226", &hit_error); skip |= ValidateImageSubresourceLayers(cb_node, &rgn.srcSubresource, "vkCmdBlitImage()", "srcSubresource", i); skip |= ValidateImageSubresourceLayers(cb_node, &rgn.dstSubresource, "vkCmdBlitImage()", "dstSubresource", i); skip |= ValidateImageMipLevel(cb_node, src_image_state, rgn.srcSubresource.mipLevel, i, "vkCmdBlitImage()", "srcSubresource", "VUID-vkCmdBlitImage-srcSubresource-01705"); skip |= ValidateImageMipLevel(cb_node, dst_image_state, rgn.dstSubresource.mipLevel, i, "vkCmdBlitImage()", "dstSubresource", "VUID-vkCmdBlitImage-dstSubresource-01706"); skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, rgn.srcSubresource.baseArrayLayer, rgn.srcSubresource.layerCount, i, "vkCmdBlitImage()", "srcSubresource", "VUID-vkCmdBlitImage-srcSubresource-01707"); skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, rgn.dstSubresource.baseArrayLayer, rgn.dstSubresource.layerCount, i, "vkCmdBlitImage()", "dstSubresource", "VUID-vkCmdBlitImage-dstSubresource-01708"); // Warn for zero-sized regions if ((rgn.srcOffsets[0].x == rgn.srcOffsets[1].x) || (rgn.srcOffsets[0].y == rgn.srcOffsets[1].y) || (rgn.srcOffsets[0].z == rgn.srcOffsets[1].z)) { std::stringstream ss; ss << "vkCmdBlitImage(): pRegions[" << i << "].srcOffsets specify a zero-volume area."; skip |= LogWarning(cb_node->commandBuffer, kVUID_Core_DrawState_InvalidExtents, "%s", ss.str().c_str()); } if ((rgn.dstOffsets[0].x == rgn.dstOffsets[1].x) || (rgn.dstOffsets[0].y == rgn.dstOffsets[1].y) || (rgn.dstOffsets[0].z == rgn.dstOffsets[1].z)) { std::stringstream ss; ss << "vkCmdBlitImage(): pRegions[" << i << "].dstOffsets specify a zero-volume area."; skip |= LogWarning(cb_node->commandBuffer, kVUID_Core_DrawState_InvalidExtents, "%s", ss.str().c_str()); } // Check that src/dst layercounts match if (rgn.srcSubresource.layerCount != rgn.dstSubresource.layerCount) { skip |= LogError( cb_node->commandBuffer, "VUID-VkImageBlit-layerCount-00239", "vkCmdBlitImage(): layerCount in source and destination subresource of pRegions[%d] does not match.", i); } if (rgn.srcSubresource.aspectMask != rgn.dstSubresource.aspectMask) { skip |= LogError(cb_node->commandBuffer, "VUID-VkImageBlit-aspectMask-00238", "vkCmdBlitImage(): aspectMask members for pRegion[%d] do not match.", i); } if (!VerifyAspectsPresent(rgn.srcSubresource.aspectMask, src_format)) { skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-aspectMask-00241", "vkCmdBlitImage(): region [%d] source aspectMask (0x%x) specifies aspects not present in source " "image format %s.", i, rgn.srcSubresource.aspectMask, string_VkFormat(src_format)); } if (!VerifyAspectsPresent(rgn.dstSubresource.aspectMask, dst_format)) { skip |= LogError( cb_node->commandBuffer, "VUID-vkCmdBlitImage-aspectMask-00242", "vkCmdBlitImage(): region [%d] dest aspectMask (0x%x) specifies aspects not present in dest image format %s.", i, rgn.dstSubresource.aspectMask, string_VkFormat(dst_format)); } // Validate source image offsets VkExtent3D src_extent = GetImageSubresourceExtent(src_image_state, &(rgn.srcSubresource)); if (VK_IMAGE_TYPE_1D == src_type) { if ((0 != rgn.srcOffsets[0].y) || (1 != rgn.srcOffsets[1].y)) { skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcImage-00245", "vkCmdBlitImage(): region [%d], source image of type VK_IMAGE_TYPE_1D with srcOffset[].y values " "of (%1d, %1d). These must be (0, 1).", i, rgn.srcOffsets[0].y, rgn.srcOffsets[1].y); } } if ((VK_IMAGE_TYPE_1D == src_type) || (VK_IMAGE_TYPE_2D == src_type)) { if ((0 != rgn.srcOffsets[0].z) || (1 != rgn.srcOffsets[1].z)) { skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcImage-00247", "vkCmdBlitImage(): region [%d], source image of type VK_IMAGE_TYPE_1D or VK_IMAGE_TYPE_2D with " "srcOffset[].z values of (%1d, %1d). These must be (0, 1).", i, rgn.srcOffsets[0].z, rgn.srcOffsets[1].z); } } bool oob = false; if ((rgn.srcOffsets[0].x < 0) || (rgn.srcOffsets[0].x > static_cast<int32_t>(src_extent.width)) || (rgn.srcOffsets[1].x < 0) || (rgn.srcOffsets[1].x > static_cast<int32_t>(src_extent.width))) { oob = true; skip |= LogError( cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcOffset-00243", "vkCmdBlitImage(): region [%d] srcOffset[].x values (%1d, %1d) exceed srcSubresource width extent (%1d).", i, rgn.srcOffsets[0].x, rgn.srcOffsets[1].x, src_extent.width); } if ((rgn.srcOffsets[0].y < 0) || (rgn.srcOffsets[0].y > static_cast<int32_t>(src_extent.height)) || (rgn.srcOffsets[1].y < 0) || (rgn.srcOffsets[1].y > static_cast<int32_t>(src_extent.height))) { oob = true; skip |= LogError( cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcOffset-00244", "vkCmdBlitImage(): region [%d] srcOffset[].y values (%1d, %1d) exceed srcSubresource height extent (%1d).", i, rgn.srcOffsets[0].y, rgn.srcOffsets[1].y, src_extent.height); } if ((rgn.srcOffsets[0].z < 0) || (rgn.srcOffsets[0].z > static_cast<int32_t>(src_extent.depth)) || (rgn.srcOffsets[1].z < 0) || (rgn.srcOffsets[1].z > static_cast<int32_t>(src_extent.depth))) { oob = true; skip |= LogError( cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcOffset-00246", "vkCmdBlitImage(): region [%d] srcOffset[].z values (%1d, %1d) exceed srcSubresource depth extent (%1d).", i, rgn.srcOffsets[0].z, rgn.srcOffsets[1].z, src_extent.depth); } if (oob) { skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-pRegions-00215", "vkCmdBlitImage(): region [%d] source image blit region exceeds image dimensions.", i); } // Validate dest image offsets VkExtent3D dst_extent = GetImageSubresourceExtent(dst_image_state, &(rgn.dstSubresource)); if (VK_IMAGE_TYPE_1D == dst_type) { if ((0 != rgn.dstOffsets[0].y) || (1 != rgn.dstOffsets[1].y)) { skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-dstImage-00250", "vkCmdBlitImage(): region [%d], dest image of type VK_IMAGE_TYPE_1D with dstOffset[].y values of " "(%1d, %1d). These must be (0, 1).", i, rgn.dstOffsets[0].y, rgn.dstOffsets[1].y); } } if ((VK_IMAGE_TYPE_1D == dst_type) || (VK_IMAGE_TYPE_2D == dst_type)) { if ((0 != rgn.dstOffsets[0].z) || (1 != rgn.dstOffsets[1].z)) { skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-dstImage-00252", "vkCmdBlitImage(): region [%d], dest image of type VK_IMAGE_TYPE_1D or VK_IMAGE_TYPE_2D with " "dstOffset[].z values of (%1d, %1d). These must be (0, 1).", i, rgn.dstOffsets[0].z, rgn.dstOffsets[1].z); } } oob = false; if ((rgn.dstOffsets[0].x < 0) || (rgn.dstOffsets[0].x > static_cast<int32_t>(dst_extent.width)) || (rgn.dstOffsets[1].x < 0) || (rgn.dstOffsets[1].x > static_cast<int32_t>(dst_extent.width))) { oob = true; skip |= LogError( cb_node->commandBuffer, "VUID-vkCmdBlitImage-dstOffset-00248", "vkCmdBlitImage(): region [%d] dstOffset[].x values (%1d, %1d) exceed dstSubresource width extent (%1d).", i, rgn.dstOffsets[0].x, rgn.dstOffsets[1].x, dst_extent.width); } if ((rgn.dstOffsets[0].y < 0) || (rgn.dstOffsets[0].y > static_cast<int32_t>(dst_extent.height)) || (rgn.dstOffsets[1].y < 0) || (rgn.dstOffsets[1].y > static_cast<int32_t>(dst_extent.height))) { oob = true; skip |= LogError( cb_node->commandBuffer, "VUID-vkCmdBlitImage-dstOffset-00249", "vkCmdBlitImage(): region [%d] dstOffset[].y values (%1d, %1d) exceed dstSubresource height extent (%1d).", i, rgn.dstOffsets[0].y, rgn.dstOffsets[1].y, dst_extent.height); } if ((rgn.dstOffsets[0].z < 0) || (rgn.dstOffsets[0].z > static_cast<int32_t>(dst_extent.depth)) || (rgn.dstOffsets[1].z < 0) || (rgn.dstOffsets[1].z > static_cast<int32_t>(dst_extent.depth))) { oob = true; skip |= LogError( cb_node->commandBuffer, "VUID-vkCmdBlitImage-dstOffset-00251", "vkCmdBlitImage(): region [%d] dstOffset[].z values (%1d, %1d) exceed dstSubresource depth extent (%1d).", i, rgn.dstOffsets[0].z, rgn.dstOffsets[1].z, dst_extent.depth); } if (oob) { skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-pRegions-00216", "vkCmdBlitImage(): region [%d] destination image blit region exceeds image dimensions.", i); } if ((VK_IMAGE_TYPE_3D == src_type) || (VK_IMAGE_TYPE_3D == dst_type)) { if ((0 != rgn.srcSubresource.baseArrayLayer) || (1 != rgn.srcSubresource.layerCount) || (0 != rgn.dstSubresource.baseArrayLayer) || (1 != rgn.dstSubresource.layerCount)) { skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcImage-00240", "vkCmdBlitImage(): region [%d] blit to/from a 3D image type with a non-zero baseArrayLayer, or a " "layerCount other than 1.", i); } } } // per-region checks } else { assert(0); } return skip; } void CoreChecks::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) { StateTracker::PreCallRecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter); auto cb_node = GetCBState(commandBuffer); auto src_image_state = GetImageState(srcImage); auto dst_image_state = GetImageState(dstImage); // Make sure that all image slices are updated to correct layout for (uint32_t i = 0; i < regionCount; ++i) { SetImageInitialLayout(cb_node, *src_image_state, pRegions[i].srcSubresource, srcImageLayout); SetImageInitialLayout(cb_node, *dst_image_state, pRegions[i].dstSubresource, dstImageLayout); } } GlobalImageLayoutRangeMap *GetLayoutRangeMap(GlobalImageLayoutMap *map, const IMAGE_STATE &image_state) { assert(map); // This approach allows for a single hash lookup or/create new auto inserted = map->emplace(std::make_pair(image_state.image, nullptr)); if (inserted.second) { assert(nullptr == inserted.first->second.get()); GlobalImageLayoutRangeMap *layout_map = new GlobalImageLayoutRangeMap(image_state.subresource_encoder.SubresourceCount()); inserted.first->second.reset(layout_map); return layout_map; } else { assert(nullptr != inserted.first->second.get()); return inserted.first->second.get(); } return nullptr; } const GlobalImageLayoutRangeMap *GetLayoutRangeMap(const GlobalImageLayoutMap &map, VkImage image) { auto it = map.find(image); if (it != map.end()) { return it->second.get(); } return nullptr; } // This validates that the initial layout specified in the command buffer for the IMAGE is the same as the global IMAGE layout bool CoreChecks::ValidateCmdBufImageLayouts(const CMD_BUFFER_STATE *pCB, const GlobalImageLayoutMap &globalImageLayoutMap, GlobalImageLayoutMap *overlayLayoutMap_arg) const { if (disabled[image_layout_validation]) return false; bool skip = false; GlobalImageLayoutMap &overlayLayoutMap = *overlayLayoutMap_arg; // Iterate over the layout maps for each referenced image GlobalImageLayoutRangeMap empty_map(1); for (const auto &layout_map_entry : pCB->image_layout_map) { const auto image = layout_map_entry.first; const auto *image_state = GetImageState(image); if (!image_state) continue; // Can't check layouts of a dead image const auto &subres_map = layout_map_entry.second; const auto &initial_layout_map = subres_map->GetInitialLayoutMap(); // Validate the initial_uses for each subresource referenced if (initial_layout_map.empty()) continue; auto *overlay_map = GetLayoutRangeMap(&overlayLayoutMap, *image_state); const auto *global_map = GetLayoutRangeMap(globalImageLayoutMap, image); if (global_map == nullptr) { global_map = &empty_map; } // Note: don't know if it would matter // if (global_map->empty() && overlay_map->empty()) // skip this next loop...; auto pos = initial_layout_map.begin(); const auto end = initial_layout_map.end(); sparse_container::parallel_iterator<const ImageSubresourceLayoutMap::LayoutMap> current_layout(*overlay_map, *global_map, pos->first.begin); while (pos != end) { VkImageLayout initial_layout = pos->second; VkImageLayout image_layout = kInvalidLayout; if (current_layout->range.empty()) break; // When we are past the end of data in overlay and global... stop looking if (current_layout->pos_A->valid) { // pos_A denotes the overlay map in the parallel iterator image_layout = current_layout->pos_A->lower_bound->second; } else if (current_layout->pos_B->valid) { // pos_B denotes the global map in the parallel iterator image_layout = current_layout->pos_B->lower_bound->second; } const auto intersected_range = pos->first & current_layout->range; if (initial_layout == VK_IMAGE_LAYOUT_UNDEFINED) { // TODO: Set memory invalid which is in mem_tracker currently } else if (image_layout != initial_layout) { // Need to look up the inital layout *state* to get a bit more information const auto *initial_layout_state = subres_map->GetSubresourceInitialLayoutState(pos->first.begin); assert(initial_layout_state); // There's no way we should have an initial layout without matching state... bool matches = ImageLayoutMatches(initial_layout_state->aspect_mask, image_layout, initial_layout); if (!matches) { // We can report all the errors for the intersected range directly for (auto index : sparse_container::range_view<decltype(intersected_range)>(intersected_range)) { const auto subresource = image_state->subresource_encoder.Decode(index); skip |= LogError( pCB->commandBuffer, kVUID_Core_DrawState_InvalidImageLayout, "Submitted command buffer expects %s (subresource: aspectMask 0x%X array layer %u, mip level %u) " "to be in layout %s--instead, current layout is %s.", report_data->FormatHandle(image).c_str(), subresource.aspectMask, subresource.arrayLayer, subresource.mipLevel, string_VkImageLayout(initial_layout), string_VkImageLayout(image_layout)); } } } if (pos->first.includes(intersected_range.end)) { current_layout.seek(intersected_range.end); } else { ++pos; if (pos != end) { current_layout.seek(pos->first.begin); } } } // Update all layout set operations (which will be a subset of the initial_layouts) sparse_container::splice(overlay_map, subres_map->GetCurrentLayoutMap(), sparse_container::value_precedence::prefer_source); } return skip; } void CoreChecks::UpdateCmdBufImageLayouts(CMD_BUFFER_STATE *pCB) { for (const auto &layout_map_entry : pCB->image_layout_map) { const auto image = layout_map_entry.first; const auto &subres_map = layout_map_entry.second; const auto *image_state = GetImageState(image); if (!image_state) continue; // Can't set layouts of a dead image auto *global_map = GetLayoutRangeMap(&imageLayoutMap, *image_state); sparse_container::splice(global_map, subres_map->GetCurrentLayoutMap(), sparse_container::value_precedence::prefer_source); } } // ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the // VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that READ_ONLY // layout attachments don't have CLEAR as their loadOp. bool CoreChecks::ValidateLayoutVsAttachmentDescription(const debug_report_data *report_data, RenderPassCreateVersion rp_version, const VkImageLayout first_layout, const uint32_t attachment, const VkAttachmentDescription2KHR &attachment_description) const { bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); // Verify that initial loadOp on READ_ONLY attachments is not CLEAR // for both loadOp and stencilLoaOp rp2 has it in 1 VU while rp1 has it in 2 VU with half behind Maintenance2 extension // Each is VUID is below in following order: rp2 -> rp1 with Maintenance2 -> rp1 with no extenstion if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) { if (use_rp2 && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) || (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) || (first_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL))) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-pAttachments-02522", "vkCreateRenderPass2(): Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout)); } else if ((use_rp2 == false) && (device_extensions.vk_khr_maintenance2) && (first_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL)) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pAttachments-01566", "vkCreateRenderPass(): Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout)); } else if ((use_rp2 == false) && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) || (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pAttachments-00836", "vkCreateRenderPass(): Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout)); } } // Same as above for loadOp, but for stencilLoadOp if (attachment_description.stencilLoadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) { if (use_rp2 && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) || (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) || (first_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL))) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-pAttachments-02523", "vkCreateRenderPass2(): Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout)); } else if ((use_rp2 == false) && (device_extensions.vk_khr_maintenance2) && (first_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL)) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pAttachments-01567", "vkCreateRenderPass(): Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout)); } else if ((use_rp2 == false) && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) || (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pAttachments-02511", "vkCreateRenderPass(): Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout)); } } return skip; } // Helper function to validate correct usage bits set for buffers or images. Verify that (actual & desired) flags != 0 or, if strict // is true, verify that (actual & desired) flags == desired template <typename T1> bool CoreChecks::ValidateUsageFlags(VkFlags actual, VkFlags desired, VkBool32 strict, const T1 object, const VulkanTypedHandle &typed_handle, const char *msgCode, char const *func_name, char const *usage_str) const { bool correct_usage = false; bool skip = false; const char *type_str = object_string[typed_handle.type]; if (strict) { correct_usage = ((actual & desired) == desired); } else { correct_usage = ((actual & desired) != 0); } if (!correct_usage) { // All callers should have a valid VUID assert(msgCode != kVUIDUndefined); skip = LogError(object, msgCode, "Invalid usage flag for %s used by %s. In this case, %s should have %s set during creation.", report_data->FormatHandle(typed_handle).c_str(), func_name, type_str, usage_str); } return skip; } // Helper function to validate usage flags for buffers. For given buffer_state send actual vs. desired usage off to helper above // where an error will be flagged if usage is not correct bool CoreChecks::ValidateImageUsageFlags(IMAGE_STATE const *image_state, VkFlags desired, bool strict, const char *msgCode, char const *func_name, char const *usage_string) const { return ValidateUsageFlags(image_state->createInfo.usage, desired, strict, image_state->image, VulkanTypedHandle(image_state->image, kVulkanObjectTypeImage), msgCode, func_name, usage_string); } bool CoreChecks::ValidateImageFormatFeatureFlags(IMAGE_STATE const *image_state, VkFormatFeatureFlags desired, char const *func_name, const char *vuid) const { bool skip = false; const VkFormatFeatureFlags image_format_features = image_state->format_features; if ((image_format_features & desired) != desired) { // Same error, but more details if it was an AHB external format if (image_state->has_ahb_format == true) { skip |= LogError(image_state->image, vuid, "In %s, VkFormatFeatureFlags (0x%08X) does not support required feature %s for the external format " "found in VkAndroidHardwareBufferFormatPropertiesANDROID::formatFeatures used by %s.", func_name, image_format_features, string_VkFormatFeatureFlags(desired).c_str(), report_data->FormatHandle(image_state->image).c_str()); } else { skip |= LogError(image_state->image, vuid, "In %s, VkFormatFeatureFlags (0x%08X) does not support required feature %s for format %u used by %s " "with tiling %s.", func_name, image_format_features, string_VkFormatFeatureFlags(desired).c_str(), image_state->createInfo.format, report_data->FormatHandle(image_state->image).c_str(), string_VkImageTiling(image_state->createInfo.tiling)); } } return skip; } bool CoreChecks::ValidateImageSubresourceLayers(const CMD_BUFFER_STATE *cb_node, const VkImageSubresourceLayers *subresource_layers, char const *func_name, char const *member, uint32_t i) const { bool skip = false; // layerCount must not be zero if (subresource_layers->layerCount == 0) { skip |= LogError(cb_node->commandBuffer, "VUID-VkImageSubresourceLayers-layerCount-01700", "In %s, pRegions[%u].%s.layerCount must not be zero.", func_name, i, member); } // aspectMask must not contain VK_IMAGE_ASPECT_METADATA_BIT if (subresource_layers->aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) { skip |= LogError(cb_node->commandBuffer, "VUID-VkImageSubresourceLayers-aspectMask-00168", "In %s, pRegions[%u].%s.aspectMask has VK_IMAGE_ASPECT_METADATA_BIT set.", func_name, i, member); } // if aspectMask contains COLOR, it must not contain either DEPTH or STENCIL if ((subresource_layers->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) && (subresource_layers->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))) { skip |= LogError(cb_node->commandBuffer, "VUID-VkImageSubresourceLayers-aspectMask-00167", "In %s, pRegions[%u].%s.aspectMask has VK_IMAGE_ASPECT_COLOR_BIT and either VK_IMAGE_ASPECT_DEPTH_BIT or " "VK_IMAGE_ASPECT_STENCIL_BIT set.", func_name, i, member); } return skip; } // Helper function to validate usage flags for buffers. For given buffer_state send actual vs. desired usage off to helper above // where an error will be flagged if usage is not correct bool CoreChecks::ValidateBufferUsageFlags(BUFFER_STATE const *buffer_state, VkFlags desired, bool strict, const char *msgCode, char const *func_name, char const *usage_string) const { return ValidateUsageFlags(buffer_state->createInfo.usage, desired, strict, buffer_state->buffer, VulkanTypedHandle(buffer_state->buffer, kVulkanObjectTypeBuffer), msgCode, func_name, usage_string); } bool CoreChecks::ValidateBufferViewRange(const BUFFER_STATE *buffer_state, const VkBufferViewCreateInfo *pCreateInfo, const VkPhysicalDeviceLimits *device_limits) const { bool skip = false; const VkDeviceSize &range = pCreateInfo->range; if (range != VK_WHOLE_SIZE) { // Range must be greater than 0 if (range <= 0) { skip |= LogError(buffer_state->buffer, "VUID-VkBufferViewCreateInfo-range-00928", "vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64 ") does not equal VK_WHOLE_SIZE, range must be greater than 0.", range); } // Range must be a multiple of the element size of format const uint32_t format_size = FormatElementSize(pCreateInfo->format); if (SafeModulo(range, format_size) != 0) { skip |= LogError(buffer_state->buffer, "VUID-VkBufferViewCreateInfo-range-00929", "vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64 ") does not equal VK_WHOLE_SIZE, range must be a multiple of the element size of the format " "(%" PRIu32 ").", range, format_size); } // Range divided by the element size of format must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements if (SafeDivision(range, format_size) > device_limits->maxTexelBufferElements) { skip |= LogError(buffer_state->buffer, "VUID-VkBufferViewCreateInfo-range-00930", "vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64 ") does not equal VK_WHOLE_SIZE, range divided by the element size of the format (%" PRIu32 ") must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements (%" PRIuLEAST32 ").", range, format_size, device_limits->maxTexelBufferElements); } // The sum of range and offset must be less than or equal to the size of buffer if (range + pCreateInfo->offset > buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer, "VUID-VkBufferViewCreateInfo-offset-00931", "vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64 ") does not equal VK_WHOLE_SIZE, the sum of offset (%" PRIuLEAST64 ") and range must be less than or equal to the size of the buffer (%" PRIuLEAST64 ").", range, pCreateInfo->offset, buffer_state->createInfo.size); } } else { const uint32_t format_size = FormatElementSize(pCreateInfo->format); // Size of buffer - offset, divided by the element size of format must be less than or equal to // VkPhysicalDeviceLimits::maxTexelBufferElements if (SafeDivision(buffer_state->createInfo.size - pCreateInfo->offset, format_size) > device_limits->maxTexelBufferElements) { skip |= LogError(buffer_state->buffer, "VUID-VkBufferViewCreateInfo-range-04059", "vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64 ") equals VK_WHOLE_SIZE, the buffer's size (%" PRIuLEAST64 ") minus the offset (%" PRIuLEAST64 "), divided by the element size of the format (%" PRIu32 ") must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements (%" PRIuLEAST32 ").", range, buffer_state->createInfo.size, pCreateInfo->offset, format_size, device_limits->maxTexelBufferElements); } } return skip; } bool CoreChecks::ValidateBufferViewBuffer(const BUFFER_STATE *buffer_state, const VkBufferViewCreateInfo *pCreateInfo) const { bool skip = false; const VkFormatProperties format_properties = GetPDFormatProperties(pCreateInfo->format); if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) && !(format_properties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)) { skip |= LogError(buffer_state->buffer, "VUID-VkBufferViewCreateInfo-buffer-00933", "vkCreateBufferView(): If buffer was created with `usage` containing " "VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, format must " "be supported for uniform texel buffers"); } if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) && !(format_properties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)) { skip |= LogError(buffer_state->buffer, "VUID-VkBufferViewCreateInfo-buffer-00934", "vkCreateBufferView(): If buffer was created with `usage` containing " "VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, format must " "be supported for storage texel buffers"); } return skip; } bool CoreChecks::PreCallValidateCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) const { bool skip = false; // TODO: Add check for "VUID-vkCreateBuffer-flags-00911" (sparse address space accounting) auto chained_devaddr_struct = lvl_find_in_chain<VkBufferDeviceAddressCreateInfoEXT>(pCreateInfo->pNext); if (chained_devaddr_struct) { if (!(pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT) && chained_devaddr_struct->deviceAddress != 0) { skip |= LogError(device, "VUID-VkBufferCreateInfo-deviceAddress-02604", "vkCreateBuffer(): Non-zero VkBufferDeviceAddressCreateInfoEXT::deviceAddress " "requires VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT."); } } auto chained_opaqueaddr_struct = lvl_find_in_chain<VkBufferOpaqueCaptureAddressCreateInfoKHR>(pCreateInfo->pNext); if (chained_opaqueaddr_struct) { if (!(pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR) && chained_opaqueaddr_struct->opaqueCaptureAddress != 0) { skip |= LogError(device, "VUID-VkBufferCreateInfo-opaqueCaptureAddress-03337", "vkCreateBuffer(): Non-zero VkBufferOpaqueCaptureAddressCreateInfoKHR::opaqueCaptureAddress" "requires VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR."); } } if ((pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR) && !enabled_features.core12.bufferDeviceAddressCaptureReplay && !enabled_features.buffer_device_address_ext.bufferDeviceAddressCaptureReplay) { skip |= LogError( device, "VUID-VkBufferCreateInfo-flags-03338", "vkCreateBuffer(): the bufferDeviceAddressCaptureReplay device feature is disabled: Buffers cannot be created with " "the VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT set."); } if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT && pCreateInfo->pQueueFamilyIndices) { skip |= ValidatePhysicalDeviceQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices, "vkCreateBuffer", "pCreateInfo->pQueueFamilyIndices", "VUID-VkBufferCreateInfo-sharingMode-01419"); } if ((pCreateInfo->flags & VK_BUFFER_CREATE_PROTECTED_BIT) != 0) { if (enabled_features.core11.protectedMemory == VK_FALSE) { skip |= LogError(device, "VUID-VkBufferCreateInfo-flags-01887", "vkCreateBuffer(): the protectedMemory device feature is disabled: Buffers cannot be created with the " "VK_BUFFER_CREATE_PROTECTED_BIT set."); } const VkBufferCreateFlags invalid_flags = VK_BUFFER_CREATE_SPARSE_BINDING_BIT | VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_ALIASED_BIT; if ((pCreateInfo->flags & invalid_flags) != 0) { skip |= LogError(device, "VUID-VkBufferCreateInfo-None-01888", "vkCreateBuffer(): VK_BUFFER_CREATE_PROTECTED_BIT is set so no sparse create flags can be used at " "same time (VK_BUFFER_CREATE_SPARSE_BINDING_BIT | VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT | " "VK_BUFFER_CREATE_SPARSE_ALIASED_BIT)."); } } return skip; } bool CoreChecks::PreCallValidateCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkBufferView *pView) const { bool skip = false; const BUFFER_STATE *buffer_state = GetBufferState(pCreateInfo->buffer); // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time if (buffer_state) { skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCreateBufferView()", "VUID-VkBufferViewCreateInfo-buffer-00935"); // In order to create a valid buffer view, the buffer must have been created with at least one of the following flags: // UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false, "VUID-VkBufferViewCreateInfo-buffer-00932", "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT"); // Buffer view offset must be less than the size of buffer if (pCreateInfo->offset >= buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer, "VUID-VkBufferViewCreateInfo-offset-00925", "vkCreateBufferView(): VkBufferViewCreateInfo offset (%" PRIuLEAST64 ") must be less than the size of the buffer (%" PRIuLEAST64 ").", pCreateInfo->offset, buffer_state->createInfo.size); } const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits; // Buffer view offset must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment if ((pCreateInfo->offset % device_limits->minTexelBufferOffsetAlignment) != 0 && !enabled_features.texel_buffer_alignment_features.texelBufferAlignment) { const char *vuid = device_extensions.vk_ext_texel_buffer_alignment ? "VUID-VkBufferViewCreateInfo-offset-02749" : "VUID-VkBufferViewCreateInfo-offset-00926"; skip |= LogError(buffer_state->buffer, vuid, "vkCreateBufferView(): VkBufferViewCreateInfo offset (%" PRIuLEAST64 ") must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment (%" PRIuLEAST64 ").", pCreateInfo->offset, device_limits->minTexelBufferOffsetAlignment); } if (enabled_features.texel_buffer_alignment_features.texelBufferAlignment) { VkDeviceSize elementSize = FormatElementSize(pCreateInfo->format); if ((elementSize % 3) == 0) { elementSize /= 3; } if (buffer_state->createInfo.usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) { VkDeviceSize alignmentRequirement = phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetAlignmentBytes; if (phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetSingleTexelAlignment) { alignmentRequirement = std::min(alignmentRequirement, elementSize); } if (SafeModulo(pCreateInfo->offset, alignmentRequirement) != 0) { skip |= LogError( buffer_state->buffer, "VUID-VkBufferViewCreateInfo-buffer-02750", "vkCreateBufferView(): If buffer was created with usage containing " "VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, " "VkBufferViewCreateInfo offset (%" PRIuLEAST64 ") must be a multiple of the lesser of " "VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::storageTexelBufferOffsetAlignmentBytes (%" PRIuLEAST64 ") or, if VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::storageTexelBufferOffsetSingleTexelAlignment " "(%" PRId32 ") is VK_TRUE, the size of a texel of the requested format. " "If the size of a texel is a multiple of three bytes, then the size of a " "single component of format is used instead", pCreateInfo->offset, phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetAlignmentBytes, phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetSingleTexelAlignment); } } if (buffer_state->createInfo.usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) { VkDeviceSize alignmentRequirement = phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetAlignmentBytes; if (phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetSingleTexelAlignment) { alignmentRequirement = std::min(alignmentRequirement, elementSize); } if (SafeModulo(pCreateInfo->offset, alignmentRequirement) != 0) { skip |= LogError( buffer_state->buffer, "VUID-VkBufferViewCreateInfo-buffer-02751", "vkCreateBufferView(): If buffer was created with usage containing " "VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, " "VkBufferViewCreateInfo offset (%" PRIuLEAST64 ") must be a multiple of the lesser of " "VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::uniformTexelBufferOffsetAlignmentBytes (%" PRIuLEAST64 ") or, if VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::uniformTexelBufferOffsetSingleTexelAlignment " "(%" PRId32 ") is VK_TRUE, the size of a texel of the requested format. " "If the size of a texel is a multiple of three bytes, then the size of a " "single component of format is used instead", pCreateInfo->offset, phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetAlignmentBytes, phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetSingleTexelAlignment); } } } skip |= ValidateBufferViewRange(buffer_state, pCreateInfo, device_limits); skip |= ValidateBufferViewBuffer(buffer_state, pCreateInfo); } return skip; } // For the given format verify that the aspect masks make sense bool CoreChecks::ValidateImageAspectMask(VkImage image, VkFormat format, VkImageAspectFlags aspect_mask, const char *func_name, const char *vuid) const { bool skip = false; const IMAGE_STATE *image_state = GetImageState(image); // checks color format and (single-plane or non-disjoint) // if ycbcr extension is not supported then single-plane and non-disjoint are always both true if ((FormatIsColor(format)) && ((FormatIsMultiplane(format) == false) || (image_state->disjoint == false))) { if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) { skip |= LogError(image, vuid, "%s: Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set.", func_name); } else if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != aspect_mask) { skip |= LogError(image, vuid, "%s: Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set.", func_name); } } else if (FormatIsDepthAndStencil(format)) { if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0) { skip |= LogError(image, vuid, "%s: Depth/stencil image formats must have at least one of VK_IMAGE_ASPECT_DEPTH_BIT and " "VK_IMAGE_ASPECT_STENCIL_BIT set.", func_name); } else if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != aspect_mask) { skip |= LogError(image, vuid, "%s: Combination depth/stencil image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT and " "VK_IMAGE_ASPECT_STENCIL_BIT set.", func_name); } } else if (FormatIsDepthOnly(format)) { if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) { skip |= LogError(image, vuid, "%s: Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set.", func_name); } else if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != aspect_mask) { skip |= LogError(image, vuid, "%s: Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set.", func_name); } } else if (FormatIsStencilOnly(format)) { if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT) { skip |= LogError(image, vuid, "%s: Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set.", func_name); } else if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != aspect_mask) { skip |= LogError(image, vuid, "%s: Stencil-only image formats can have only the VK_IMAGE_ASPECT_STENCIL_BIT set.", func_name); } } else if (FormatIsMultiplane(format)) { VkImageAspectFlags valid_flags = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT; if (3 == FormatPlaneCount(format)) { valid_flags = valid_flags | VK_IMAGE_ASPECT_PLANE_2_BIT; } if ((aspect_mask & valid_flags) != aspect_mask) { skip |= LogError(image, vuid, "%s: Multi-plane image formats may have only VK_IMAGE_ASPECT_COLOR_BIT or VK_IMAGE_ASPECT_PLANE_n_BITs " "set, where n = [0, 1, 2].", func_name); } } return skip; } bool CoreChecks::ValidateImageSubresourceRange(const uint32_t image_mip_count, const uint32_t image_layer_count, const VkImageSubresourceRange &subresourceRange, const char *cmd_name, const char *param_name, const char *image_layer_count_var_name, const VkImage image, SubresourceRangeErrorCodes errorCodes) const { bool skip = false; // Validate mip levels if (subresourceRange.baseMipLevel >= image_mip_count) { skip |= LogError(image, errorCodes.base_mip_err, "%s: %s.baseMipLevel (= %" PRIu32 ") is greater or equal to the mip level count of the image (i.e. greater or equal to %" PRIu32 ").", cmd_name, param_name, subresourceRange.baseMipLevel, image_mip_count); } if (subresourceRange.levelCount != VK_REMAINING_MIP_LEVELS) { if (subresourceRange.levelCount == 0) { skip |= LogError(image, "VUID-VkImageSubresourceRange-levelCount-01720", "%s: %s.levelCount is 0.", cmd_name, param_name); } else { const uint64_t necessary_mip_count = uint64_t{subresourceRange.baseMipLevel} + uint64_t{subresourceRange.levelCount}; if (necessary_mip_count > image_mip_count) { skip |= LogError(image, errorCodes.mip_count_err, "%s: %s.baseMipLevel + .levelCount (= %" PRIu32 " + %" PRIu32 " = %" PRIu64 ") is greater than the mip level count of the image (i.e. greater than %" PRIu32 ").", cmd_name, param_name, subresourceRange.baseMipLevel, subresourceRange.levelCount, necessary_mip_count, image_mip_count); } } } // Validate array layers if (subresourceRange.baseArrayLayer >= image_layer_count) { skip |= LogError(image, errorCodes.base_layer_err, "%s: %s.baseArrayLayer (= %" PRIu32 ") is greater or equal to the %s of the image when it was created (i.e. greater or equal to %" PRIu32 ").", cmd_name, param_name, subresourceRange.baseArrayLayer, image_layer_count_var_name, image_layer_count); } if (subresourceRange.layerCount != VK_REMAINING_ARRAY_LAYERS) { if (subresourceRange.layerCount == 0) { skip |= LogError(image, "VUID-VkImageSubresourceRange-layerCount-01721", "%s: %s.layerCount is 0.", cmd_name, param_name); } else { const uint64_t necessary_layer_count = uint64_t{subresourceRange.baseArrayLayer} + uint64_t{subresourceRange.layerCount}; if (necessary_layer_count > image_layer_count) { skip |= LogError(image, errorCodes.layer_count_err, "%s: %s.baseArrayLayer + .layerCount (= %" PRIu32 " + %" PRIu32 " = %" PRIu64 ") is greater than the %s of the image when it was created (i.e. greater than %" PRIu32 ").", cmd_name, param_name, subresourceRange.baseArrayLayer, subresourceRange.layerCount, necessary_layer_count, image_layer_count_var_name, image_layer_count); } } } return skip; } bool CoreChecks::ValidateCreateImageViewSubresourceRange(const IMAGE_STATE *image_state, bool is_imageview_2d_type, const VkImageSubresourceRange &subresourceRange) const { bool is_khr_maintenance1 = IsExtEnabled(device_extensions.vk_khr_maintenance1); bool is_image_slicable = image_state->createInfo.imageType == VK_IMAGE_TYPE_3D && (image_state->createInfo.flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR); bool is_3D_to_2D_map = is_khr_maintenance1 && is_image_slicable && is_imageview_2d_type; const auto image_layer_count = is_3D_to_2D_map ? image_state->createInfo.extent.depth : image_state->createInfo.arrayLayers; const auto image_layer_count_var_name = is_3D_to_2D_map ? "extent.depth" : "arrayLayers"; SubresourceRangeErrorCodes subresourceRangeErrorCodes = {}; subresourceRangeErrorCodes.base_mip_err = "VUID-VkImageViewCreateInfo-subresourceRange-01478"; subresourceRangeErrorCodes.mip_count_err = "VUID-VkImageViewCreateInfo-subresourceRange-01718"; subresourceRangeErrorCodes.base_layer_err = is_khr_maintenance1 ? (is_3D_to_2D_map ? "VUID-VkImageViewCreateInfo-image-02724" : "VUID-VkImageViewCreateInfo-image-01482") : "VUID-VkImageViewCreateInfo-subresourceRange-01480"; subresourceRangeErrorCodes.layer_count_err = is_khr_maintenance1 ? (is_3D_to_2D_map ? "VUID-VkImageViewCreateInfo-subresourceRange-02725" : "VUID-VkImageViewCreateInfo-subresourceRange-01483") : "VUID-VkImageViewCreateInfo-subresourceRange-01719"; return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_layer_count, subresourceRange, "vkCreateImageView", "pCreateInfo->subresourceRange", image_layer_count_var_name, image_state->image, subresourceRangeErrorCodes); } bool CoreChecks::ValidateCmdClearColorSubresourceRange(const IMAGE_STATE *image_state, const VkImageSubresourceRange &subresourceRange, const char *param_name) const { SubresourceRangeErrorCodes subresourceRangeErrorCodes = {}; subresourceRangeErrorCodes.base_mip_err = "VUID-vkCmdClearColorImage-baseMipLevel-01470"; subresourceRangeErrorCodes.mip_count_err = "VUID-vkCmdClearColorImage-pRanges-01692"; subresourceRangeErrorCodes.base_layer_err = "VUID-vkCmdClearColorImage-baseArrayLayer-01472"; subresourceRangeErrorCodes.layer_count_err = "VUID-vkCmdClearColorImage-pRanges-01693"; return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange, "vkCmdClearColorImage", param_name, "arrayLayers", image_state->image, subresourceRangeErrorCodes); } bool CoreChecks::ValidateCmdClearDepthSubresourceRange(const IMAGE_STATE *image_state, const VkImageSubresourceRange &subresourceRange, const char *param_name) const { SubresourceRangeErrorCodes subresourceRangeErrorCodes = {}; subresourceRangeErrorCodes.base_mip_err = "VUID-vkCmdClearDepthStencilImage-baseMipLevel-01474"; subresourceRangeErrorCodes.mip_count_err = "VUID-vkCmdClearDepthStencilImage-pRanges-01694"; subresourceRangeErrorCodes.base_layer_err = "VUID-vkCmdClearDepthStencilImage-baseArrayLayer-01476"; subresourceRangeErrorCodes.layer_count_err = "VUID-vkCmdClearDepthStencilImage-pRanges-01695"; return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange, "vkCmdClearDepthStencilImage", param_name, "arrayLayers", image_state->image, subresourceRangeErrorCodes); } bool CoreChecks::ValidateImageBarrierSubresourceRange(const IMAGE_STATE *image_state, const VkImageSubresourceRange &subresourceRange, const char *cmd_name, const char *param_name) const { SubresourceRangeErrorCodes subresourceRangeErrorCodes = {}; subresourceRangeErrorCodes.base_mip_err = "VUID-VkImageMemoryBarrier-subresourceRange-01486"; subresourceRangeErrorCodes.mip_count_err = "VUID-VkImageMemoryBarrier-subresourceRange-01724"; subresourceRangeErrorCodes.base_layer_err = "VUID-VkImageMemoryBarrier-subresourceRange-01488"; subresourceRangeErrorCodes.layer_count_err = "VUID-VkImageMemoryBarrier-subresourceRange-01725"; return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange, cmd_name, param_name, "arrayLayers", image_state->image, subresourceRangeErrorCodes); } bool CoreChecks::ValidateImageViewFormatFeatures(const IMAGE_STATE *image_state, const VkFormat view_format, const VkImageUsageFlags image_usage) const { // Pass in image_usage here instead of extracting it from image_state in case there's a chained VkImageViewUsageCreateInfo bool skip = false; VkFormatFeatureFlags tiling_features = VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM; const VkImageTiling image_tiling = image_state->createInfo.tiling; if (image_state->has_ahb_format == true) { // AHB image view and image share same feature sets tiling_features = image_state->format_features; } else if (image_tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) { // Parameter validation should catch if this is used without VK_EXT_image_drm_format_modifier assert(device_extensions.vk_ext_image_drm_format_modifier); VkImageDrmFormatModifierPropertiesEXT drm_format_properties = {VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT, nullptr}; DispatchGetImageDrmFormatModifierPropertiesEXT(device, image_state->image, &drm_format_properties); VkFormatProperties2 format_properties_2 = {VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, nullptr}; VkDrmFormatModifierPropertiesListEXT drm_properties_list = {VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT, nullptr}; format_properties_2.pNext = (void *)&drm_properties_list; DispatchGetPhysicalDeviceFormatProperties2(physical_device, view_format, &format_properties_2); for (uint32_t i = 0; i < drm_properties_list.drmFormatModifierCount; i++) { if ((drm_properties_list.pDrmFormatModifierProperties[i].drmFormatModifier & drm_format_properties.drmFormatModifier) != 0) { tiling_features |= drm_properties_list.pDrmFormatModifierProperties[i].drmFormatModifierTilingFeatures; } } } else { VkFormatProperties format_properties = GetPDFormatProperties(view_format); tiling_features = (image_tiling == VK_IMAGE_TILING_LINEAR) ? format_properties.linearTilingFeatures : format_properties.optimalTilingFeatures; } if (tiling_features == 0) { skip |= LogError(image_state->image, "VUID-VkImageViewCreateInfo-None-02273", "vkCreateImageView(): pCreateInfo->format %s with tiling %s has no supported format features on this " "physical device.", string_VkFormat(view_format), string_VkImageTiling(image_tiling)); } else if ((image_usage & VK_IMAGE_USAGE_SAMPLED_BIT) && !(tiling_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) { skip |= LogError(image_state->image, "VUID-VkImageViewCreateInfo-usage-02274", "vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes " "VK_IMAGE_USAGE_SAMPLED_BIT.", string_VkFormat(view_format), string_VkImageTiling(image_tiling)); } else if ((image_usage & VK_IMAGE_USAGE_STORAGE_BIT) && !(tiling_features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) { skip |= LogError(image_state->image, "VUID-VkImageViewCreateInfo-usage-02275", "vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes " "VK_IMAGE_USAGE_STORAGE_BIT.", string_VkFormat(view_format), string_VkImageTiling(image_tiling)); } else if ((image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) && !(tiling_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) { skip |= LogError(image_state->image, "VUID-VkImageViewCreateInfo-usage-02276", "vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes " "VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT.", string_VkFormat(view_format), string_VkImageTiling(image_tiling)); } else if ((image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) && !(tiling_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) { skip |= LogError(image_state->image, "VUID-VkImageViewCreateInfo-usage-02277", "vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes " "VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT.", string_VkFormat(view_format), string_VkImageTiling(image_tiling)); } else if ((image_usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) && !(tiling_features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))) { skip |= LogError(image_state->image, "VUID-VkImageViewCreateInfo-usage-02652", "vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes " "VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT or VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.", string_VkFormat(view_format), string_VkImageTiling(image_tiling)); } return skip; } bool CoreChecks::PreCallValidateCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImageView *pView) const { bool skip = false; const IMAGE_STATE *image_state = GetImageState(pCreateInfo->image); if (image_state) { skip |= ValidateImageUsageFlags(image_state, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV | VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT, false, "VUID-VkImageViewCreateInfo-image-04441", "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT|" "TRANSIENT_ATTACHMENT|SHADING_RATE_IMAGE|FRAGMENT_DENSITY_MAP]_BIT"); // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time skip |= ValidateMemoryIsBoundToImage(image_state, "vkCreateImageView()", "VUID-VkImageViewCreateInfo-image-01020"); // Checks imported from image layer skip |= ValidateCreateImageViewSubresourceRange( image_state, pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_2D || pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY, pCreateInfo->subresourceRange); VkImageCreateFlags image_flags = image_state->createInfo.flags; VkFormat image_format = image_state->createInfo.format; VkImageUsageFlags image_usage = image_state->createInfo.usage; VkFormat view_format = pCreateInfo->format; VkImageAspectFlags aspect_mask = pCreateInfo->subresourceRange.aspectMask; VkImageType image_type = image_state->createInfo.imageType; VkImageViewType view_type = pCreateInfo->viewType; // If there's a chained VkImageViewUsageCreateInfo struct, modify image_usage to match auto chained_ivuci_struct = lvl_find_in_chain<VkImageViewUsageCreateInfoKHR>(pCreateInfo->pNext); if (chained_ivuci_struct) { if (device_extensions.vk_khr_maintenance2) { if (!device_extensions.vk_ext_separate_stencil_usage) { if ((image_usage | chained_ivuci_struct->usage) != image_usage) { skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-02661", "vkCreateImageView(): pNext chain includes VkImageViewUsageCreateInfo, usage must not " "include any bits that were not set in VkImageCreateInfo::usage used to create image"); } } else { const auto image_stencil_struct = lvl_find_in_chain<VkImageStencilUsageCreateInfoEXT>(image_state->createInfo.pNext); if (image_stencil_struct == nullptr) { if ((image_usage | chained_ivuci_struct->usage) != image_usage) { skip |= LogError( pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-02662", "vkCreateImageView(): pNext chain includes VkImageViewUsageCreateInfo and image was not created " "with a VkImageStencilUsageCreateInfo in pNext of vkImageCreateInfo, usage must not include " "any bits that were not set in VkImageCreateInfo::usage used to create image"); } } else { if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) == VK_IMAGE_ASPECT_STENCIL_BIT && (image_stencil_struct->stencilUsage | chained_ivuci_struct->usage) != image_stencil_struct->stencilUsage) { skip |= LogError( pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-02663", "vkCreateImageView(): pNext chain includes VkImageViewUsageCreateInfo, image was created with a " "VkImageStencilUsageCreateInfo in pNext of vkImageCreateInfo, and subResourceRange.aspectMask " "includes VK_IMAGE_ASPECT_STENCIL_BIT, VkImageViewUsageCreateInfo::usage must not include any " "bits that were not set in VkImageStencilUsageCreateInfo::stencilUsage used to create image"); } if ((aspect_mask & ~VK_IMAGE_ASPECT_STENCIL_BIT) != 0 && (image_usage | chained_ivuci_struct->usage) != image_usage) { skip |= LogError( pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-02664", "vkCreateImageView(): pNext chain includes VkImageViewUsageCreateInfo, image was created with a " "VkImageStencilUsageCreateInfo in pNext of vkImageCreateInfo, and subResourceRange.aspectMask " "includes bits other than VK_IMAGE_ASPECT_STENCIL_BIT, VkImageViewUsageCreateInfo::usage must not " "include any bits that were not set in VkImageCreateInfo::usage used to create image"); } } } } image_usage = chained_ivuci_struct->usage; } // Validate VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT state, if view/image formats differ if ((image_flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) && (image_format != view_format)) { if (FormatIsMultiplane(image_format)) { VkFormat compat_format = FindMultiplaneCompatibleFormat(image_format, aspect_mask); if (view_format != compat_format) { // View format must match the multiplane compatible format std::stringstream ss; ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format) << " is not compatible with plane " << GetPlaneIndex(aspect_mask) << " of underlying image format " << string_VkFormat(image_format) << ", must be " << string_VkFormat(compat_format) << "."; skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-01586", "%s", ss.str().c_str()); } } else { if ((!(image_flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR)) || (!FormatIsMultiplane(image_format))) { // Format MUST be compatible (in the same format compatibility class) as the format the image was created with if (FormatCompatibilityClass(image_format) != FormatCompatibilityClass(view_format)) { const char *error_vuid; if ((!device_extensions.vk_khr_maintenance2) && (!device_extensions.vk_khr_sampler_ycbcr_conversion)) { error_vuid = "VUID-VkImageViewCreateInfo-image-01018"; } else if ((device_extensions.vk_khr_maintenance2) && (!device_extensions.vk_khr_sampler_ycbcr_conversion)) { error_vuid = "VUID-VkImageViewCreateInfo-image-01759"; } else if ((!device_extensions.vk_khr_maintenance2) && (device_extensions.vk_khr_sampler_ycbcr_conversion)) { error_vuid = "VUID-VkImageViewCreateInfo-image-01760"; } else { // both enabled error_vuid = "VUID-VkImageViewCreateInfo-image-01761"; } std::stringstream ss; ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format) << " is not in the same format compatibility class as " << report_data->FormatHandle(pCreateInfo->image).c_str() << " format " << string_VkFormat(image_format) << ". Images created with the VK_IMAGE_CREATE_MUTABLE_FORMAT BIT " << "can support ImageViews with differing formats but they must be in the same compatibility class."; skip |= LogError(pCreateInfo->image, error_vuid, "%s", ss.str().c_str()); } } } } else { // Format MUST be IDENTICAL to the format the image was created with // Unless it is a multi-planar color bit aspect if ((image_format != view_format) && ((FormatIsMultiplane(image_format) == false) || (aspect_mask != VK_IMAGE_ASPECT_COLOR_BIT))) { const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion) ? "VUID-VkImageViewCreateInfo-image-01762" : "VUID-VkImageViewCreateInfo-image-01019"; std::stringstream ss; ss << "vkCreateImageView() format " << string_VkFormat(view_format) << " differs from " << report_data->FormatHandle(pCreateInfo->image).c_str() << " format " << string_VkFormat(image_format) << ". Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation."; skip |= LogError(pCreateInfo->image, vuid, "%s", ss.str().c_str()); } } // Validate correct image aspect bits for desired formats and format consistency skip |= ValidateImageAspectMask(image_state->image, image_format, aspect_mask, "vkCreateImageView()"); switch (image_type) { case VK_IMAGE_TYPE_1D: if (view_type != VK_IMAGE_VIEW_TYPE_1D && view_type != VK_IMAGE_VIEW_TYPE_1D_ARRAY) { skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021", "vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.", string_VkImageViewType(view_type), string_VkImageType(image_type)); } break; case VK_IMAGE_TYPE_2D: if (view_type != VK_IMAGE_VIEW_TYPE_2D && view_type != VK_IMAGE_VIEW_TYPE_2D_ARRAY) { if ((view_type == VK_IMAGE_VIEW_TYPE_CUBE || view_type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) && !(image_flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)) { skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-01003", "vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.", string_VkImageViewType(view_type), string_VkImageType(image_type)); } else if (view_type != VK_IMAGE_VIEW_TYPE_CUBE && view_type != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) { skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021", "vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.", string_VkImageViewType(view_type), string_VkImageType(image_type)); } } break; case VK_IMAGE_TYPE_3D: if (device_extensions.vk_khr_maintenance1) { if (view_type != VK_IMAGE_VIEW_TYPE_3D) { if ((view_type == VK_IMAGE_VIEW_TYPE_2D || view_type == VK_IMAGE_VIEW_TYPE_2D_ARRAY)) { if (!(image_flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR)) { skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-01005", "vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.", string_VkImageViewType(view_type), string_VkImageType(image_type)); } else if ((image_flags & (VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_ALIASED_BIT))) { skip |= LogError( pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021", "vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s " "when the VK_IMAGE_CREATE_SPARSE_BINDING_BIT, VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, or " "VK_IMAGE_CREATE_SPARSE_ALIASED_BIT flags are enabled.", string_VkImageViewType(view_type), string_VkImageType(image_type)); } } else { skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021", "vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.", string_VkImageViewType(view_type), string_VkImageType(image_type)); } } } else { if (view_type != VK_IMAGE_VIEW_TYPE_3D) { skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021", "vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.", string_VkImageViewType(view_type), string_VkImageType(image_type)); } } break; default: break; } // External format checks needed when VK_ANDROID_external_memory_android_hardware_buffer enabled if (device_extensions.vk_android_external_memory_android_hardware_buffer) { skip |= ValidateCreateImageViewANDROID(pCreateInfo); } skip |= ValidateImageViewFormatFeatures(image_state, view_format, image_usage); if (image_usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV) { if (view_type != VK_IMAGE_VIEW_TYPE_2D && view_type != VK_IMAGE_VIEW_TYPE_2D_ARRAY) { skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-02086", "vkCreateImageView() If image was created with usage containing " "VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, viewType must be " "VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY."); } if (view_format != VK_FORMAT_R8_UINT) { skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-02087", "vkCreateImageView() If image was created with usage containing " "VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, format must be VK_FORMAT_R8_UINT."); } } if (pCreateInfo->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS) { if (pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_CUBE && image_state->createInfo.arrayLayers - pCreateInfo->subresourceRange.baseArrayLayer != 6) { skip |= LogError(device, "VUID-VkImageViewCreateInfo-viewType-02962", "vkCreateImageView(): subresourceRange.layerCount VK_REMAINING_ARRAY_LAYERS=(%d) must be 6", image_state->createInfo.arrayLayers - pCreateInfo->subresourceRange.baseArrayLayer); } if (pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY && ((image_state->createInfo.arrayLayers - pCreateInfo->subresourceRange.baseArrayLayer) % 6) != 0) { skip |= LogError( device, "VUID-VkImageViewCreateInfo-viewType-02963", "vkCreateImageView(): subresourceRange.layerCount VK_REMAINING_ARRAY_LAYERS=(%d) must be a multiple of 6", image_state->createInfo.arrayLayers - pCreateInfo->subresourceRange.baseArrayLayer); } } if (image_usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) { if (pCreateInfo->subresourceRange.levelCount != 1) { skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-02571", "vkCreateImageView(): If image was created with usage containing " "VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT, subresourceRange.levelCount (%d) must: be 1", pCreateInfo->subresourceRange.levelCount); } } if (pCreateInfo->flags & VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT) { if (!enabled_features.fragment_density_map_features.fragmentDensityMapDynamic) { skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-flags-02572", "vkCreateImageView(): If the fragmentDensityMapDynamic feature is not enabled, " "flags must not contain VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT"); } } else { if (image_usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) { if (image_flags & (VK_IMAGE_CREATE_PROTECTED_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_ALIASED_BIT)) { skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-flags-04116", "vkCreateImageView(): If image was created with usage containing " "VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT flags must not contain any of " "VK_IMAGE_CREATE_PROTECTED_BIT, VK_IMAGE_CREATE_SPARSE_BINDING_BIT, " "VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, or VK_IMAGE_CREATE_SPARSE_ALIASED_BIT"); } } } if (pCreateInfo->flags & VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT) { if (!enabled_features.fragment_density_map2_features.fragmentDensityMapDeferred) { skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-flags-03567", "vkCreateImageView(): If the fragmentDensityMapDeferred feature is not enabled, " "flags must not contain VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT"); } if (pCreateInfo->flags & VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT) { skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-flags-03568", "vkCreateImageView(): If flags contains VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT, " "flags must not contain VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT"); } } if (device_extensions.vk_ext_fragment_density_map_2) { if ((image_flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) && (image_usage & VK_IMAGE_USAGE_SAMPLED_BIT) && (pCreateInfo->subresourceRange.layerCount > phys_dev_ext_props.fragment_density_map2_props.maxSubsampledArrayLayers)) { skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-03569", "vkCreateImageView(): If image was created with flags containing " "VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT and usage containing VK_IMAGE_USAGE_SAMPLED_BIT " "subresourceRange.layerCount (%d) must: be less than or equal to maxSubsampledArrayLayers (%d)", pCreateInfo->subresourceRange.layerCount, phys_dev_ext_props.fragment_density_map2_props.maxSubsampledArrayLayers); } } auto astc_decode_mode = lvl_find_in_chain<VkImageViewASTCDecodeModeEXT>(pCreateInfo->pNext); if ((device_extensions.vk_ext_astc_decode_mode) && (astc_decode_mode != nullptr)) { if ((enabled_features.astc_decode_features.decodeModeSharedExponent == VK_FALSE) && (astc_decode_mode->decodeMode == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32)) { skip |= LogError(device, "VUID-VkImageViewASTCDecodeModeEXT-decodeMode-02231", "vkCreateImageView(): decodeModeSharedExponent is not enabled but " "VkImageViewASTCDecodeModeEXT::decodeMode is VK_FORMAT_E5B9G9R9_UFLOAT_PACK32."); } } } return skip; } bool CoreChecks::ValidateCmdCopyBufferBounds(const BUFFER_STATE *src_buffer_state, const BUFFER_STATE *dst_buffer_state, uint32_t regionCount, const VkBufferCopy *pRegions) const { bool skip = false; VkDeviceSize src_buffer_size = src_buffer_state->createInfo.size; VkDeviceSize dst_buffer_size = dst_buffer_state->createInfo.size; VkDeviceSize src_min = UINT64_MAX; VkDeviceSize src_max = 0; VkDeviceSize dst_min = UINT64_MAX; VkDeviceSize dst_max = 0; for (uint32_t i = 0; i < regionCount; i++) { src_min = std::min(src_min, pRegions[i].srcOffset); src_max = std::max(src_max, (pRegions[i].srcOffset + pRegions[i].size)); dst_min = std::min(dst_min, pRegions[i].dstOffset); dst_max = std::max(dst_max, (pRegions[i].dstOffset + pRegions[i].size)); // The srcOffset member of each element of pRegions must be less than the size of srcBuffer if (pRegions[i].srcOffset >= src_buffer_size) { skip |= LogError(src_buffer_state->buffer, "VUID-vkCmdCopyBuffer-srcOffset-00113", "vkCmdCopyBuffer(): pRegions[%d].srcOffset (%" PRIuLEAST64 ") is greater than pRegions[%d].size (%" PRIuLEAST64 ").", i, pRegions[i].srcOffset, i, pRegions[i].size); } // The dstOffset member of each element of pRegions must be less than the size of dstBuffer if (pRegions[i].dstOffset >= dst_buffer_size) { skip |= LogError(dst_buffer_state->buffer, "VUID-vkCmdCopyBuffer-dstOffset-00114", "vkCmdCopyBuffer(): pRegions[%d].dstOffset (%" PRIuLEAST64 ") is greater than pRegions[%d].size (%" PRIuLEAST64 ").", i, pRegions[i].dstOffset, i, pRegions[i].size); } // The size member of each element of pRegions must be less than or equal to the size of srcBuffer minus srcOffset if (pRegions[i].size > (src_buffer_size - pRegions[i].srcOffset)) { skip |= LogError(src_buffer_state->buffer, "VUID-vkCmdCopyBuffer-size-00115", "vkCmdCopyBuffer(): pRegions[%d].size (%" PRIuLEAST64 ") is greater than the source buffer size (%" PRIuLEAST64 ") minus pRegions[%d].srcOffset (%" PRIuLEAST64 ").", i, pRegions[i].size, src_buffer_size, i, pRegions[i].srcOffset); } // The size member of each element of pRegions must be less than or equal to the size of dstBuffer minus dstOffset if (pRegions[i].size > (dst_buffer_size - pRegions[i].dstOffset)) { skip |= LogError(dst_buffer_state->buffer, "VUID-vkCmdCopyBuffer-size-00116", "vkCmdCopyBuffer(): pRegions[%d].size (%" PRIuLEAST64 ") is greater than the destination buffer size (%" PRIuLEAST64 ") minus pRegions[%d].dstOffset (%" PRIuLEAST64 ").", i, pRegions[i].size, dst_buffer_size, i, pRegions[i].dstOffset); } } // The union of the source regions, and the union of the destination regions, must not overlap in memory if (src_buffer_state->buffer == dst_buffer_state->buffer) { if (((src_min > dst_min) && (src_min < dst_max)) || ((src_max > dst_min) && (src_max < dst_max))) { skip |= LogError(src_buffer_state->buffer, "VUID-vkCmdCopyBuffer-pRegions-00117", "vkCmdCopyBuffer(): Detected overlap between source and dest regions in memory."); } } return skip; } bool CoreChecks::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy *pRegions) const { const auto cb_node = GetCBState(commandBuffer); const auto src_buffer_state = GetBufferState(srcBuffer); const auto dst_buffer_state = GetBufferState(dstBuffer); bool skip = false; skip |= ValidateMemoryIsBoundToBuffer(src_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-srcBuffer-00119"); skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-dstBuffer-00121"); // Validate that SRC & DST buffers have correct usage flags set skip |= ValidateBufferUsageFlags(src_buffer_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdCopyBuffer-srcBuffer-00118", "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT"); skip |= ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyBuffer-dstBuffer-00120", "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); skip |= ValidateCmdQueueFlags(cb_node, "vkCmdCopyBuffer()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdCopyBuffer-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()"); skip |= InsideRenderPass(cb_node, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-renderpass"); skip |= ValidateCmdCopyBufferBounds(src_buffer_state, dst_buffer_state, regionCount, pRegions); skip |= ValidateProtectedBuffer(cb_node, src_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-commandBuffer-01822"); skip |= ValidateProtectedBuffer(cb_node, dst_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-commandBuffer-01823"); skip |= ValidateUnprotectedBuffer(cb_node, dst_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-commandBuffer-01824"); return skip; } bool CoreChecks::ValidateIdleBuffer(VkBuffer buffer) const { bool skip = false; auto buffer_state = GetBufferState(buffer); if (buffer_state) { if (buffer_state->in_use.load()) { skip |= LogError(buffer, "VUID-vkDestroyBuffer-buffer-00922", "Cannot free %s that is in use by a command buffer.", report_data->FormatHandle(buffer).c_str()); } } return skip; } bool CoreChecks::PreCallValidateDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) const { const IMAGE_VIEW_STATE *image_view_state = GetImageViewState(imageView); const VulkanTypedHandle obj_struct(imageView, kVulkanObjectTypeImageView); bool skip = false; if (image_view_state) { skip |= ValidateObjectNotInUse(image_view_state, obj_struct, "vkDestroyImageView", "VUID-vkDestroyImageView-imageView-01026"); } return skip; } bool CoreChecks::PreCallValidateDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) const { auto buffer_state = GetBufferState(buffer); bool skip = false; if (buffer_state) { skip |= ValidateIdleBuffer(buffer); } return skip; } bool CoreChecks::PreCallValidateDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) const { auto buffer_view_state = GetBufferViewState(bufferView); const VulkanTypedHandle obj_struct(bufferView, kVulkanObjectTypeBufferView); bool skip = false; if (buffer_view_state) { skip |= ValidateObjectNotInUse(buffer_view_state, obj_struct, "vkDestroyBufferView", "VUID-vkDestroyBufferView-bufferView-00936"); } return skip; } bool CoreChecks::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) const { auto cb_node = GetCBState(commandBuffer); auto buffer_state = GetBufferState(dstBuffer); bool skip = false; skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-dstBuffer-00031"); skip |= ValidateCmdQueueFlags(cb_node, "vkCmdFillBuffer()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdFillBuffer-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()"); // Validate that DST buffer has correct usage flags set skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdFillBuffer-dstBuffer-00029", "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); skip |= InsideRenderPass(cb_node, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-renderpass"); skip |= ValidateProtectedBuffer(cb_node, buffer_state, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-commandBuffer-01811"); skip |= ValidateUnprotectedBuffer(cb_node, buffer_state, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-commandBuffer-01812"); if (dstOffset >= buffer_state->createInfo.size) { skip |= LogError(dstBuffer, "VUID-vkCmdFillBuffer-dstOffset-00024", "vkCmdFillBuffer(): dstOffset (0x%" PRIxLEAST64 ") is not less than destination buffer (%s) size (0x%" PRIxLEAST64 ").", dstOffset, report_data->FormatHandle(dstBuffer).c_str(), buffer_state->createInfo.size); } if ((size != VK_WHOLE_SIZE) && (size > (buffer_state->createInfo.size - dstOffset))) { skip |= LogError(dstBuffer, "VUID-vkCmdFillBuffer-size-00027", "vkCmdFillBuffer(): size (0x%" PRIxLEAST64 ") is greater than dstBuffer (%s) size (0x%" PRIxLEAST64 ") minus dstOffset (0x%" PRIxLEAST64 ").", size, report_data->FormatHandle(dstBuffer).c_str(), buffer_state->createInfo.size, dstOffset); } return skip; } bool CoreChecks::ValidateBufferImageCopyData(const CMD_BUFFER_STATE *cb_node, uint32_t regionCount, const VkBufferImageCopy *pRegions, const IMAGE_STATE *image_state, const char *function) const { bool skip = false; assert(image_state != nullptr); const VkFormat image_format = image_state->createInfo.format; for (uint32_t i = 0; i < regionCount; i++) { const VkImageAspectFlags region_aspect_mask = pRegions[i].imageSubresource.aspectMask; if (image_state->createInfo.imageType == VK_IMAGE_TYPE_1D) { if ((pRegions[i].imageOffset.y != 0) || (pRegions[i].imageExtent.height != 1)) { skip |= LogError(image_state->image, "VUID-vkCmdCopyBufferToImage-srcImage-00199", "%s(): pRegion[%d] imageOffset.y is %d and imageExtent.height is %d. For 1D images these must be 0 " "and 1, respectively.", function, i, pRegions[i].imageOffset.y, pRegions[i].imageExtent.height); } } if ((image_state->createInfo.imageType == VK_IMAGE_TYPE_1D) || (image_state->createInfo.imageType == VK_IMAGE_TYPE_2D)) { if ((pRegions[i].imageOffset.z != 0) || (pRegions[i].imageExtent.depth != 1)) { skip |= LogError(image_state->image, "VUID-vkCmdCopyBufferToImage-srcImage-00201", "%s(): pRegion[%d] imageOffset.z is %d and imageExtent.depth is %d. For 1D and 2D images these " "must be 0 and 1, respectively.", function, i, pRegions[i].imageOffset.z, pRegions[i].imageExtent.depth); } } if (image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) { if ((0 != pRegions[i].imageSubresource.baseArrayLayer) || (1 != pRegions[i].imageSubresource.layerCount)) { skip |= LogError(image_state->image, "VUID-vkCmdCopyBufferToImage-baseArrayLayer-00213", "%s(): pRegion[%d] imageSubresource.baseArrayLayer is %d and imageSubresource.layerCount is %d. " "For 3D images these must be 0 and 1, respectively.", function, i, pRegions[i].imageSubresource.baseArrayLayer, pRegions[i].imageSubresource.layerCount); } } // If the the calling command's VkImage parameter's format is not a depth/stencil format, // then bufferOffset must be a multiple of the calling command's VkImage parameter's element size uint32_t element_size = FormatElementSize(image_format, region_aspect_mask); // If not depth/stencil and not multi-plane if ((!FormatIsDepthAndStencil(image_format) && !FormatIsMultiplane(image_format)) && SafeModulo(pRegions[i].bufferOffset, element_size) != 0) { const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion) ? "VUID-vkCmdCopyBufferToImage-bufferOffset-01558" : "VUID-vkCmdCopyBufferToImage-bufferOffset-00193"; skip |= LogError(image_state->image, vuid, "%s(): pRegion[%d] bufferOffset 0x%" PRIxLEAST64 " must be a multiple of this format's texel size (%" PRIu32 ").", function, i, pRegions[i].bufferOffset, element_size); } // BufferRowLength must be 0, or greater than or equal to the width member of imageExtent if ((pRegions[i].bufferRowLength != 0) && (pRegions[i].bufferRowLength < pRegions[i].imageExtent.width)) { skip |= LogError(image_state->image, "VUID-VkBufferImageCopy-bufferRowLength-00195", "%s(): pRegion[%d] bufferRowLength (%d) must be zero or greater-than-or-equal-to imageExtent.width (%d).", function, i, pRegions[i].bufferRowLength, pRegions[i].imageExtent.width); } // BufferImageHeight must be 0, or greater than or equal to the height member of imageExtent if ((pRegions[i].bufferImageHeight != 0) && (pRegions[i].bufferImageHeight < pRegions[i].imageExtent.height)) { skip |= LogError( image_state->image, "VUID-VkBufferImageCopy-bufferImageHeight-00196", "%s(): pRegion[%d] bufferImageHeight (%d) must be zero or greater-than-or-equal-to imageExtent.height (%d).", function, i, pRegions[i].bufferImageHeight, pRegions[i].imageExtent.height); } // Calculate adjusted image extent, accounting for multiplane image factors VkExtent3D adjusted_image_extent = GetImageSubresourceExtent(image_state, &pRegions[i].imageSubresource); // imageOffset.x and (imageExtent.width + imageOffset.x) must both be >= 0 and <= image subresource width if ((pRegions[i].imageOffset.x < 0) || (pRegions[i].imageOffset.x > static_cast<int32_t>(adjusted_image_extent.width)) || ((pRegions[i].imageOffset.x + static_cast<int32_t>(pRegions[i].imageExtent.width)) > static_cast<int32_t>(adjusted_image_extent.width))) { skip |= LogError(image_state->image, "VUID-vkCmdCopyBufferToImage-imageOffset-00197", "%s(): Both pRegion[%d] imageoffset.x (%d) and (imageExtent.width + imageOffset.x) (%d) must be >= " "zero or <= image subresource width (%d).", function, i, pRegions[i].imageOffset.x, (pRegions[i].imageOffset.x + pRegions[i].imageExtent.width), adjusted_image_extent.width); } // imageOffset.y and (imageExtent.height + imageOffset.y) must both be >= 0 and <= image subresource height if ((pRegions[i].imageOffset.y < 0) || (pRegions[i].imageOffset.y > static_cast<int32_t>(adjusted_image_extent.height)) || ((pRegions[i].imageOffset.y + static_cast<int32_t>(pRegions[i].imageExtent.height)) > static_cast<int32_t>(adjusted_image_extent.height))) { skip |= LogError(image_state->image, "VUID-vkCmdCopyBufferToImage-imageOffset-00198", "%s(): Both pRegion[%d] imageoffset.y (%d) and (imageExtent.height + imageOffset.y) (%d) must be >= " "zero or <= image subresource height (%d).", function, i, pRegions[i].imageOffset.y, (pRegions[i].imageOffset.y + pRegions[i].imageExtent.height), adjusted_image_extent.height); } // imageOffset.z and (imageExtent.depth + imageOffset.z) must both be >= 0 and <= image subresource depth if ((pRegions[i].imageOffset.z < 0) || (pRegions[i].imageOffset.z > static_cast<int32_t>(adjusted_image_extent.depth)) || ((pRegions[i].imageOffset.z + static_cast<int32_t>(pRegions[i].imageExtent.depth)) > static_cast<int32_t>(adjusted_image_extent.depth))) { skip |= LogError(image_state->image, "VUID-vkCmdCopyBufferToImage-imageOffset-00200", "%s(): Both pRegion[%d] imageoffset.z (%d) and (imageExtent.depth + imageOffset.z) (%d) must be >= " "zero or <= image subresource depth (%d).", function, i, pRegions[i].imageOffset.z, (pRegions[i].imageOffset.z + pRegions[i].imageExtent.depth), adjusted_image_extent.depth); } // subresource aspectMask must have exactly 1 bit set const int num_bits = sizeof(VkFlags) * CHAR_BIT; std::bitset<num_bits> aspect_mask_bits(region_aspect_mask); if (aspect_mask_bits.count() != 1) { skip |= LogError(image_state->image, "VUID-VkBufferImageCopy-aspectMask-00212", "%s(): aspectMasks for imageSubresource in pRegion[%d] must have only a single bit set.", function, i); } // image subresource aspect bit must match format if (!VerifyAspectsPresent(region_aspect_mask, image_format)) { skip |= LogError( image_state->image, "VUID-vkCmdCopyBufferToImage-aspectMask-00211", "%s(): pRegion[%d] subresource aspectMask 0x%x specifies aspects that are not present in image format 0x%x.", function, i, region_aspect_mask, image_format); } // Checks that apply only to compressed images if (FormatIsCompressed(image_format) || FormatIsSinglePlane_422(image_format)) { auto block_size = FormatTexelBlockExtent(image_format); // BufferRowLength must be a multiple of block width if (SafeModulo(pRegions[i].bufferRowLength, block_size.width) != 0) { const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion) ? "VUID-vkCmdCopyBufferToImage-bufferRowLength-00203" : "VUID-vkCmdCopyBufferToImage-bufferRowLength-00203"; skip |= LogError( image_state->image, vuid, "%s(): pRegion[%d] bufferRowLength (%d) must be a multiple of the compressed image's texel width (%d)..", function, i, pRegions[i].bufferRowLength, block_size.width); } // BufferRowHeight must be a multiple of block height if (SafeModulo(pRegions[i].bufferImageHeight, block_size.height) != 0) { const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion) ? "VUID-vkCmdCopyBufferToImage-bufferImageHeight-00204" : "VUID-vkCmdCopyBufferToImage-bufferImageHeight-00204"; skip |= LogError( image_state->image, vuid, "%s(): pRegion[%d] bufferImageHeight (%d) must be a multiple of the compressed image's texel height (%d)..", function, i, pRegions[i].bufferImageHeight, block_size.height); } // image offsets must be multiples of block dimensions if ((SafeModulo(pRegions[i].imageOffset.x, block_size.width) != 0) || (SafeModulo(pRegions[i].imageOffset.y, block_size.height) != 0) || (SafeModulo(pRegions[i].imageOffset.z, block_size.depth) != 0)) { const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion) ? "VUID-vkCmdCopyBufferToImage-imageOffset-00205" : "VUID-vkCmdCopyBufferToImage-imageOffset-00205"; skip |= LogError(image_state->image, vuid, "%s(): pRegion[%d] imageOffset(x,y) (%d, %d) must be multiples of the compressed image's texel " "width & height (%d, %d)..", function, i, pRegions[i].imageOffset.x, pRegions[i].imageOffset.y, block_size.width, block_size.height); } // bufferOffset must be a multiple of block size (linear bytes) uint32_t block_size_in_bytes = FormatElementSize(image_format); if (SafeModulo(pRegions[i].bufferOffset, block_size_in_bytes) != 0) { const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion) ? "VUID-vkCmdCopyBufferToImage-bufferOffset-00206" : "VUID-vkCmdCopyBufferToImage-bufferOffset-00206"; skip |= LogError(image_state->image, vuid, "%s(): pRegion[%d] bufferOffset (0x%" PRIxLEAST64 ") must be a multiple of the compressed image's texel block size (%" PRIu32 ")..", function, i, pRegions[i].bufferOffset, block_size_in_bytes); } // imageExtent width must be a multiple of block width, or extent+offset width must equal subresource width VkExtent3D mip_extent = GetImageSubresourceExtent(image_state, &(pRegions[i].imageSubresource)); if ((SafeModulo(pRegions[i].imageExtent.width, block_size.width) != 0) && (pRegions[i].imageExtent.width + pRegions[i].imageOffset.x != mip_extent.width)) { const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion) ? "VUID-vkCmdCopyBufferToImage-imageExtent-00207" : "VUID-vkCmdCopyBufferToImage-imageExtent-00207"; skip |= LogError(image_state->image, vuid, "%s(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block width " "(%d), or when added to offset.x (%d) must equal the image subresource width (%d)..", function, i, pRegions[i].imageExtent.width, block_size.width, pRegions[i].imageOffset.x, mip_extent.width); } // imageExtent height must be a multiple of block height, or extent+offset height must equal subresource height if ((SafeModulo(pRegions[i].imageExtent.height, block_size.height) != 0) && (pRegions[i].imageExtent.height + pRegions[i].imageOffset.y != mip_extent.height)) { const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion) ? "VUID-vkCmdCopyBufferToImage-imageExtent-00208" : "VUID-vkCmdCopyBufferToImage-imageExtent-00208"; skip |= LogError(image_state->image, vuid, "%s(): pRegion[%d] extent height (%d) must be a multiple of the compressed texture block height " "(%d), or when added to offset.y (%d) must equal the image subresource height (%d)..", function, i, pRegions[i].imageExtent.height, block_size.height, pRegions[i].imageOffset.y, mip_extent.height); } // imageExtent depth must be a multiple of block depth, or extent+offset depth must equal subresource depth if ((SafeModulo(pRegions[i].imageExtent.depth, block_size.depth) != 0) && (pRegions[i].imageExtent.depth + pRegions[i].imageOffset.z != mip_extent.depth)) { const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion) ? "VUID-vkCmdCopyBufferToImage-imageExtent-00209" : "VUID-vkCmdCopyBufferToImage-imageExtent-00209"; skip |= LogError(image_state->image, vuid, "%s(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block depth " "(%d), or when added to offset.z (%d) must equal the image subresource depth (%d)..", function, i, pRegions[i].imageExtent.depth, block_size.depth, pRegions[i].imageOffset.z, mip_extent.depth); } } // Checks that apply only to multi-planar format images if (FormatIsMultiplane(image_format)) { // VK_IMAGE_ASPECT_PLANE_2_BIT valid only for image formats with three planes if ((FormatPlaneCount(image_format) < 3) && (region_aspect_mask == VK_IMAGE_ASPECT_PLANE_2_BIT)) { skip |= LogError(image_state->image, "VUID-vkCmdCopyBufferToImage-aspectMask-01560", "%s(): pRegion[%d] subresource aspectMask cannot be VK_IMAGE_ASPECT_PLANE_2_BIT unless image " "format has three planes.", function, i); } // image subresource aspectMask must be VK_IMAGE_ASPECT_PLANE_*_BIT if (0 == (region_aspect_mask & (VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT))) { skip |= LogError(image_state->image, "VUID-vkCmdCopyBufferToImage-aspectMask-01560", "%s(): pRegion[%d] subresource aspectMask for multi-plane image formats must have a " "VK_IMAGE_ASPECT_PLANE_*_BIT when copying to or from.", function, i); } else { // Know aspect mask is valid const VkFormat compatible_format = FindMultiplaneCompatibleFormat(image_format, region_aspect_mask); const uint32_t compatible_size = FormatElementSize(compatible_format); if (SafeModulo(pRegions[i].bufferOffset, compatible_size) != 0) { skip |= LogError( image_state->image, "VUID-vkCmdCopyBufferToImage-bufferOffset-01559", "%s(): pRegion[%d]->bufferOffset is 0x%" PRIxLEAST64 " but must be a multiple of the multi-plane compatible format's texel size (%u) for plane %u (%s).", function, i, pRegions[i].bufferOffset, element_size, GetPlaneIndex(region_aspect_mask), string_VkFormat(compatible_format)); } } } // Checks depth or stencil aspect are used in graphics queue if ((region_aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != 0) { assert(cb_node != nullptr); const COMMAND_POOL_STATE *command_pool = cb_node->command_pool.get(); if (command_pool != nullptr) { const uint32_t queueFamilyIndex = command_pool->queueFamilyIndex; const VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[queueFamilyIndex].queueFlags; if ((queue_flags & VK_QUEUE_GRAPHICS_BIT) == 0) { LogObjectList objlist(cb_node->commandBuffer); objlist.add(command_pool->commandPool); // TODO - Label when future headers get merged in from internral MR 4077 fix skip |= LogError(image_state->image, "UNASSIGNED-VkBufferImageCopy-aspectMask", "%s(): pRegion[%d] subresource aspectMask 0x%x specifies VK_IMAGE_ASPECT_DEPTH_BIT or " "VK_IMAGE_ASPECT_STENCIL_BIT but the command buffer %s was allocated from the command pool %s " "which was create with queueFamilyIndex %u which doesn't contain the VK_QUEUE_GRAPHICS_BIT flag.", function, i, region_aspect_mask, report_data->FormatHandle(cb_node->commandBuffer).c_str(), report_data->FormatHandle(command_pool->commandPool).c_str(), queueFamilyIndex); } } } } return skip; } bool CoreChecks::ValidateImageBounds(const IMAGE_STATE *image_state, const uint32_t regionCount, const VkBufferImageCopy *pRegions, const char *func_name, const char *msg_code) const { bool skip = false; const VkImageCreateInfo *image_info = &(image_state->createInfo); for (uint32_t i = 0; i < regionCount; i++) { VkExtent3D extent = pRegions[i].imageExtent; VkOffset3D offset = pRegions[i].imageOffset; if (IsExtentSizeZero(&extent)) // Warn on zero area subresource { skip |= LogWarning(image_state->image, kVUID_Core_Image_ZeroAreaSubregion, "%s: pRegion[%d] imageExtent of {%1d, %1d, %1d} has zero area", func_name, i, extent.width, extent.height, extent.depth); } VkExtent3D image_extent = GetImageSubresourceExtent(image_state, &(pRegions[i].imageSubresource)); // If we're using a compressed format, valid extent is rounded up to multiple of block size (per 18.1) if (FormatIsCompressed(image_info->format) || FormatIsSinglePlane_422(image_state->createInfo.format)) { auto block_extent = FormatTexelBlockExtent(image_info->format); if (image_extent.width % block_extent.width) { image_extent.width += (block_extent.width - (image_extent.width % block_extent.width)); } if (image_extent.height % block_extent.height) { image_extent.height += (block_extent.height - (image_extent.height % block_extent.height)); } if (image_extent.depth % block_extent.depth) { image_extent.depth += (block_extent.depth - (image_extent.depth % block_extent.depth)); } } if (0 != ExceedsBounds(&offset, &extent, &image_extent)) { skip |= LogError(image_state->image, msg_code, "%s: pRegion[%d] exceeds image bounds..", func_name, i); } } return skip; } bool CoreChecks::ValidateBufferBounds(const IMAGE_STATE *image_state, const BUFFER_STATE *buff_state, uint32_t regionCount, const VkBufferImageCopy *pRegions, const char *func_name, const char *msg_code) const { bool skip = false; VkDeviceSize buffer_size = buff_state->createInfo.size; for (uint32_t i = 0; i < regionCount; i++) { VkDeviceSize max_buffer_offset = GetBufferSizeFromCopyImage(pRegions[i], image_state->createInfo.format) + pRegions[i].bufferOffset; if (buffer_size < max_buffer_offset) { skip |= LogError(device, msg_code, "%s: pRegion[%d] exceeds buffer size of %" PRIu64 " bytes..", func_name, i, buffer_size); } } return skip; } bool CoreChecks::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) const { const auto cb_node = GetCBState(commandBuffer); const auto src_image_state = GetImageState(srcImage); const auto dst_buffer_state = GetBufferState(dstBuffer); bool skip = ValidateBufferImageCopyData(cb_node, regionCount, pRegions, src_image_state, "vkCmdCopyImageToBuffer"); // Validate command buffer state skip |= ValidateCmd(cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()"); // Command pool must support graphics, compute, or transfer operations const auto pPool = cb_node->command_pool.get(); VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pPool->queueFamilyIndex].queueFlags; if (0 == (queue_flags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))) { skip |= LogError(cb_node->createInfo.commandPool, "VUID-vkCmdCopyImageToBuffer-commandBuffer-cmdpool", "Cannot call vkCmdCopyImageToBuffer() on a command buffer allocated from a pool without graphics, compute, " "or transfer capabilities.."); } skip |= ValidateImageBounds(src_image_state, regionCount, pRegions, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-pRegions-00182"); skip |= ValidateBufferBounds(src_image_state, dst_buffer_state, regionCount, pRegions, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); skip |= ValidateImageSampleCount(src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyImageToBuffer(): srcImage", "VUID-vkCmdCopyImageToBuffer-srcImage-00188"); skip |= ValidateMemoryIsBoundToImage(src_image_state, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-srcImage-00187"); skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-dstBuffer-00192"); // Validate that SRC image & DST buffer have correct usage flags set skip |= ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdCopyImageToBuffer-srcImage-00186", "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT"); skip |= ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyImageToBuffer-dstBuffer-00191", "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); skip |= ValidateProtectedImage(cb_node, src_image_state, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-commandBuffer-01831"); skip |= ValidateProtectedBuffer(cb_node, dst_buffer_state, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-commandBuffer-01832"); skip |= ValidateUnprotectedBuffer(cb_node, dst_buffer_state, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-commandBuffer-01833"); // Validation for VK_EXT_fragment_density_map if (src_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) { skip |= LogError(cb_node->commandBuffer, "vkCmdCopyImageToBuffer-srcImage-02544", "vkCmdCopyBufferToImage(): srcImage must not have been created with flags containing " "VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT"); } if (device_extensions.vk_khr_maintenance1) { skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-srcImage-01998"); } skip |= InsideRenderPass(cb_node, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-renderpass"); bool hit_error = false; const char *src_invalid_layout_vuid = (src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image) ? "VUID-vkCmdCopyImageToBuffer-srcImageLayout-01397" : "VUID-vkCmdCopyImageToBuffer-srcImageLayout-00190"; for (uint32_t i = 0; i < regionCount; ++i) { skip |= ValidateImageSubresourceLayers(cb_node, &pRegions[i].imageSubresource, "vkCmdCopyImageToBuffer()", "imageSubresource", i); skip |= VerifyImageLayout(cb_node, src_image_state, pRegions[i].imageSubresource, srcImageLayout, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdCopyImageToBuffer()", src_invalid_layout_vuid, "VUID-vkCmdCopyImageToBuffer-srcImageLayout-00189", &hit_error); skip |= ValidateCopyBufferImageTransferGranularityRequirements( cb_node, src_image_state, &pRegions[i], i, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); skip |= ValidateImageMipLevel(cb_node, src_image_state, pRegions[i].imageSubresource.mipLevel, i, "vkCmdCopyImageToBuffer()", "imageSubresource", "VUID-vkCmdCopyImageToBuffer-imageSubresource-01703"); skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, pRegions[i].imageSubresource.baseArrayLayer, pRegions[i].imageSubresource.layerCount, i, "vkCmdCopyImageToBuffer()", "imageSubresource", "VUID-vkCmdCopyImageToBuffer-imageSubresource-01704"); } return skip; } void CoreChecks::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) { StateTracker::PreCallRecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions); auto cb_node = GetCBState(commandBuffer); auto src_image_state = GetImageState(srcImage); // Make sure that all image slices record referenced layout for (uint32_t i = 0; i < regionCount; ++i) { SetImageInitialLayout(cb_node, *src_image_state, pRegions[i].imageSubresource, srcImageLayout); } } bool CoreChecks::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy *pRegions) const { const auto cb_node = GetCBState(commandBuffer); const auto src_buffer_state = GetBufferState(srcBuffer); const auto dst_image_state = GetImageState(dstImage); bool skip = ValidateBufferImageCopyData(cb_node, regionCount, pRegions, dst_image_state, "vkCmdCopyBufferToImage"); // Validate command buffer state skip |= ValidateCmd(cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()"); // Command pool must support graphics, compute, or transfer operations const auto pPool = cb_node->command_pool.get(); VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pPool->queueFamilyIndex].queueFlags; if (0 == (queue_flags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))) { skip |= LogError(cb_node->createInfo.commandPool, "VUID-vkCmdCopyBufferToImage-commandBuffer-cmdpool", "Cannot call vkCmdCopyBufferToImage() on a command buffer allocated from a pool without graphics, compute, " "or transfer capabilities.."); } skip |= ValidateImageBounds(dst_image_state, regionCount, pRegions, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-pRegions-00172"); skip |= ValidateBufferBounds(dst_image_state, src_buffer_state, regionCount, pRegions, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-pRegions-00171"); skip |= ValidateImageSampleCount(dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyBufferToImage(): dstImage", "VUID-vkCmdCopyBufferToImage-dstImage-00179"); skip |= ValidateMemoryIsBoundToBuffer(src_buffer_state, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-srcBuffer-00176"); skip |= ValidateMemoryIsBoundToImage(dst_image_state, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-dstImage-00178"); skip |= ValidateBufferUsageFlags(src_buffer_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdCopyBufferToImage-srcBuffer-00174", "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT"); skip |= ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyBufferToImage-dstImage-00177", "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT"); skip |= ValidateProtectedBuffer(cb_node, src_buffer_state, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-commandBuffer-01828"); skip |= ValidateProtectedImage(cb_node, dst_image_state, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-commandBuffer-01829"); skip |= ValidateUnprotectedImage(cb_node, dst_image_state, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-commandBuffer-01830"); // Validation for VK_EXT_fragment_density_map if (dst_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) { skip |= LogError(cb_node->commandBuffer, "vkCmdCopyBufferToImage-dstImage-02543", "vkCmdCopyBufferToImage(): dstImage must not have been created with flags containing " "VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT"); } if (device_extensions.vk_khr_maintenance1) { skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-dstImage-01997"); } skip |= InsideRenderPass(cb_node, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-renderpass"); bool hit_error = false; const char *dst_invalid_layout_vuid = (dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image) ? "VUID-vkCmdCopyBufferToImage-dstImageLayout-01396" : "VUID-vkCmdCopyBufferToImage-dstImageLayout-00181"; for (uint32_t i = 0; i < regionCount; ++i) { skip |= ValidateImageSubresourceLayers(cb_node, &pRegions[i].imageSubresource, "vkCmdCopyBufferToImage()", "imageSubresource", i); skip |= VerifyImageLayout(cb_node, dst_image_state, pRegions[i].imageSubresource, dstImageLayout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdCopyBufferToImage()", dst_invalid_layout_vuid, "VUID-vkCmdCopyBufferToImage-dstImageLayout-00180", &hit_error); skip |= ValidateCopyBufferImageTransferGranularityRequirements( cb_node, dst_image_state, &pRegions[i], i, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-imageOffset-01793"); skip |= ValidateImageMipLevel(cb_node, dst_image_state, pRegions[i].imageSubresource.mipLevel, i, "vkCmdCopyBufferToImage()", "imageSubresource", "VUID-vkCmdCopyBufferToImage-imageSubresource-01701"); skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, pRegions[i].imageSubresource.baseArrayLayer, pRegions[i].imageSubresource.layerCount, i, "vkCmdCopyBufferToImage()", "imageSubresource", "VUID-vkCmdCopyBufferToImage-imageSubresource-01702"); } return skip; } void CoreChecks::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy *pRegions) { StateTracker::PreCallRecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions); auto cb_node = GetCBState(commandBuffer); auto dst_image_state = GetImageState(dstImage); // Make sure that all image slices are record referenced layout for (uint32_t i = 0; i < regionCount; ++i) { SetImageInitialLayout(cb_node, *dst_image_state, pRegions[i].imageSubresource, dstImageLayout); } } bool CoreChecks::PreCallValidateGetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource, VkSubresourceLayout *pLayout) const { bool skip = false; const VkImageAspectFlags sub_aspect = pSubresource->aspectMask; // The aspectMask member of pSubresource must only have a single bit set const int num_bits = sizeof(sub_aspect) * CHAR_BIT; std::bitset<num_bits> aspect_mask_bits(sub_aspect); if (aspect_mask_bits.count() != 1) { skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-aspectMask-00997", "vkGetImageSubresourceLayout(): VkImageSubresource.aspectMask must have exactly 1 bit set."); } const IMAGE_STATE *image_entry = GetImageState(image); if (!image_entry) { return skip; } // Image must have been created with tiling equal to VK_IMAGE_TILING_LINEAR if (device_extensions.vk_ext_image_drm_format_modifier) { if ((image_entry->createInfo.tiling != VK_IMAGE_TILING_LINEAR) && (image_entry->createInfo.tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT)) { skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-image-02270", "vkGetImageSubresourceLayout(): Image must have tiling of VK_IMAGE_TILING_LINEAR or " "VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT."); } } else { if (image_entry->createInfo.tiling != VK_IMAGE_TILING_LINEAR) { skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-image-00996", "vkGetImageSubresourceLayout(): Image must have tiling of VK_IMAGE_TILING_LINEAR."); } } // mipLevel must be less than the mipLevels specified in VkImageCreateInfo when the image was created if (pSubresource->mipLevel >= image_entry->createInfo.mipLevels) { skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-mipLevel-01716", "vkGetImageSubresourceLayout(): pSubresource.mipLevel (%d) must be less than %d.", pSubresource->mipLevel, image_entry->createInfo.mipLevels); } // arrayLayer must be less than the arrayLayers specified in VkImageCreateInfo when the image was created if (pSubresource->arrayLayer >= image_entry->createInfo.arrayLayers) { skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-arrayLayer-01717", "vkGetImageSubresourceLayout(): pSubresource.arrayLayer (%d) must be less than %d.", pSubresource->arrayLayer, image_entry->createInfo.arrayLayers); } // subresource's aspect must be compatible with image's format. const VkFormat img_format = image_entry->createInfo.format; if (image_entry->createInfo.tiling == VK_IMAGE_TILING_LINEAR) { if (FormatIsMultiplane(img_format)) { VkImageAspectFlags allowed_flags = (VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | VK_IMAGE_ASPECT_PLANE_1_BIT_KHR); const char *vuid = "VUID-vkGetImageSubresourceLayout-format-01581"; // 2-plane version if (FormatPlaneCount(img_format) > 2u) { allowed_flags |= VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; vuid = "VUID-vkGetImageSubresourceLayout-format-01582"; // 3-plane version } if (sub_aspect != (sub_aspect & allowed_flags)) { skip |= LogError(image, vuid, "vkGetImageSubresourceLayout(): For multi-planar images, VkImageSubresource.aspectMask (0x%" PRIx32 ") must be a single-plane specifier flag.", sub_aspect); } } else if (FormatIsColor(img_format)) { if (sub_aspect != VK_IMAGE_ASPECT_COLOR_BIT) { skip |= LogError(image, kVUID_Core_DrawState_InvalidImageAspect, "vkGetImageSubresourceLayout(): For color formats, VkImageSubresource.aspectMask must be " "VK_IMAGE_ASPECT_COLOR."); } } else if (FormatIsDepthOrStencil(img_format)) { if ((sub_aspect != VK_IMAGE_ASPECT_DEPTH_BIT) && (sub_aspect != VK_IMAGE_ASPECT_STENCIL_BIT)) { } } } else if (image_entry->createInfo.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) { if ((sub_aspect != VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT) && (sub_aspect != VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT) && (sub_aspect != VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT) && (sub_aspect != VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)) { // TODO: This VU also needs to ensure that the DRM index is in range and valid. skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-tiling-02271", "vkGetImageSubresourceLayout(): VkImageSubresource.aspectMask must be " "VK_IMAGE_ASPECT_MEMORY_PLANE_i_BIT_EXT."); } } if (device_extensions.vk_android_external_memory_android_hardware_buffer) { skip |= ValidateGetImageSubresourceLayoutANDROID(image); } return skip; } // Validates the image is allowed to be protected bool CoreChecks::ValidateProtectedImage(const CMD_BUFFER_STATE *cb_state, const IMAGE_STATE *image_state, const char *cmd_name, const char *vuid) const { bool skip = false; if ((cb_state->unprotected == true) && (image_state->unprotected == false)) { LogObjectList objlist(cb_state->commandBuffer); objlist.add(image_state->image); skip |= LogError(objlist, vuid, "%s: command buffer %s is unprotected while image %s is a protected image", cmd_name, report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(image_state->image).c_str()); } return skip; } // Validates the image is allowed to be unprotected bool CoreChecks::ValidateUnprotectedImage(const CMD_BUFFER_STATE *cb_state, const IMAGE_STATE *image_state, const char *cmd_name, const char *vuid) const { bool skip = false; if ((cb_state->unprotected == false) && (image_state->unprotected == true)) { LogObjectList objlist(cb_state->commandBuffer); objlist.add(image_state->image); skip |= LogError(objlist, vuid, "%s: command buffer %s is protected while image %s is an unprotected image", cmd_name, report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(image_state->image).c_str()); } return skip; } // Validates the buffer is allowed to be protected bool CoreChecks::ValidateProtectedBuffer(const CMD_BUFFER_STATE *cb_state, const BUFFER_STATE *buffer_state, const char *cmd_name, const char *vuid) const { bool skip = false; if ((cb_state->unprotected == true) && (buffer_state->unprotected == false)) { LogObjectList objlist(cb_state->commandBuffer); objlist.add(buffer_state->buffer); skip |= LogError(objlist, vuid, "%s: command buffer %s is unprotected while buffer %s is a protected buffer", cmd_name, report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(buffer_state->buffer).c_str()); } return skip; } // Validates the buffer is allowed to be unprotected bool CoreChecks::ValidateUnprotectedBuffer(const CMD_BUFFER_STATE *cb_state, const BUFFER_STATE *buffer_state, const char *cmd_name, const char *vuid) const { bool skip = false; if ((cb_state->unprotected == false) && (buffer_state->unprotected == true)) { LogObjectList objlist(cb_state->commandBuffer); objlist.add(buffer_state->buffer); skip |= LogError(objlist, vuid, "%s: command buffer %s is protected while buffer %s is an unprotected buffer", cmd_name, report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(buffer_state->buffer).c_str()); } return skip; }
1
14,377
`!FormatIsMultiplane(image_format)` will always be true at this point due to the test on line 4639.
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -24,6 +24,7 @@ from typing import Any, Optional, List, Union import numpy as np import pandas as pd +from pandas.core.accessor import CachedAccessor from pyspark import sql as spark from pyspark.sql import functions as F
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A wrapper class for Spark Column to behave similar to pandas Series. """ import re import inspect from functools import partial, wraps from typing import Any, Optional, List, Union import numpy as np import pandas as pd from pyspark import sql as spark from pyspark.sql import functions as F from pyspark.sql.types import BooleanType, StructType from databricks import koalas as ks # For running doctests and reference resolution in PyCharm. from databricks.koalas.base import IndexOpsMixin from databricks.koalas.frame import DataFrame from databricks.koalas.generic import _Frame, max_display_count from databricks.koalas.metadata import Metadata from databricks.koalas.missing.series import _MissingPandasLikeSeries from databricks.koalas.utils import validate_arguments_and_invoke_function # This regular expression pattern is complied and defined here to avoid to compile the same # pattern every time it is used in _repr_ in Series. # This pattern basically seeks the footer string from Pandas' REPR_PATTERN = re.compile(r"Length: (?P<length>[0-9]+)") _flex_doc_SERIES = """ Return {desc} of series and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}``, but with support to substitute a fill_value for missing data in one of the inputs. Parameters ---------- other : Series or scalar value fill_value : None or float value, default None (NaN) Fill existing missing (NaN) values, and any new element needed for successful Series alignment, with this value before computation. If data in both corresponding Series locations is missing the result will be missing. level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series The result of the operation. See Also -------- Series.{reverse} {series_examples} """ _add_example_SERIES = """ Examples -------- >>> df = ks.DataFrame({'a': [1, 1, 1, np.nan], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df a b a 1.0 1.0 b 1.0 NaN c 1.0 1.0 d NaN NaN >>> df.a.add(df.b) a 2.0 b NaN c 2.0 d NaN Name: a, dtype: float64 """ _sub_example_SERIES = """ Examples -------- >>> df = ks.DataFrame({'a': [1, 1, 1, np.nan], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df a b a 1.0 1.0 b 1.0 NaN c 1.0 1.0 d NaN NaN >>> df.a.subtract(df.b) a 0.0 b NaN c 0.0 d NaN Name: a, dtype: float64 """ _mul_example_SERIES = """ Examples -------- >>> df = ks.DataFrame({'a': [2, 2, 4, np.nan], ... 'b': [2, np.nan, 2, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df a b a 2.0 2.0 b 2.0 NaN c 4.0 2.0 d NaN NaN >>> df.a.multiply(df.b) a 4.0 b NaN c 8.0 d NaN Name: a, dtype: float64 """ _div_example_SERIES = """ Examples -------- >>> df = ks.DataFrame({'a': [2, 2, 4, np.nan], ... 'b': [2, np.nan, 2, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df a b a 2.0 2.0 b 2.0 NaN c 4.0 2.0 d NaN NaN >>> df.a.divide(df.b) a 1.0 b NaN c 2.0 d NaN Name: a, dtype: float64 """ class Series(_Frame, IndexOpsMixin): """ Koala Series that corresponds to Pandas Series logically. This holds Spark Column internally. :ivar _scol: Spark Column instance :type _scol: pyspark.Column :ivar _kdf: Parent's Koalas DataFrame :type _kdf: ks.DataFrame :ivar _index_map: Each pair holds the index field name which exists in Spark fields, and the index name. Parameters ---------- data : array-like, dict, or scalar value, Pandas Series or Spark Column Contains data stored in Series If data is a dict, argument order is maintained for Python 3.6 and later. Note that if `data` is a Pandas Series, other arguments should not be used. If `data` is a Spark Column, all other arguments except `index` should not be used. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index sequence are used, the index will override the keys found in the dict. If `data` is a Spark DataFrame, `index` is expected to be `Metadata`s `index_map`. dtype : numpy.dtype or None If None, dtype will be inferred copy : boolean, default False Copy input data """ def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False, anchor=None): if isinstance(data, pd.Series): assert index is None assert dtype is None assert name is None assert not copy assert anchor is None assert not fastpath self._init_from_pandas(data) elif isinstance(data, spark.Column): assert dtype is None assert name is None assert not copy assert not fastpath self._init_from_spark(data, anchor, index) else: s = pd.Series( data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath) self._init_from_pandas(s) def _init_from_pandas(self, s): """ Creates Koalas Series from Pandas Series. :param s: Pandas Series """ kdf = DataFrame(pd.DataFrame(s)) self._init_from_spark(kdf._sdf[kdf._metadata.data_columns[0]], kdf, kdf._metadata.index_map) def _init_from_spark(self, scol, kdf, index_map): """ Creates Koalas Series from Spark Column. :param scol: Spark Column :param kdf: Koalas DataFrame that should have the `scol`. :param index_map: index information of this Series. """ assert index_map is not None assert kdf is not None assert isinstance(kdf, ks.DataFrame), type(kdf) self._scol = scol self._kdf = kdf self._index_map = index_map def _with_new_scol(self, scol: spark.Column) -> 'Series': """ Copy Koalas Series with the new Spark Column. :param scol: the new Spark Column :return: the copied Series """ return Series(scol, anchor=self._kdf, index=self._index_map) @property def dt(self): from databricks.koalas.datetimes import DatetimeMethods return DatetimeMethods(self) @property def dtypes(self): """Return the dtype object of the underlying data. >>> s = ks.Series(list('abc')) >>> s.dtype == s.dtypes True """ return self.dtype @property def spark_type(self): """ Returns the data type as defined by Spark, as a Spark DataType object.""" return self.schema.fields[-1].dataType # Arithmetic Operators def add(self, other): return (self + other).rename(self.name) add.__doc__ = _flex_doc_SERIES.format( desc='Addition', op_name="+", equiv="series + other", reverse='radd', series_examples=_add_example_SERIES) def radd(self, other): return (other + self).rename(self.name) radd.__doc__ = _flex_doc_SERIES.format( desc='Addition', op_name="+", equiv="other + series", reverse='add', series_examples=_add_example_SERIES) def div(self, other): return (self / other).rename(self.name) div.__doc__ = _flex_doc_SERIES.format( desc='Floating division', op_name="/", equiv="series / other", reverse='rdiv', series_examples=_div_example_SERIES) divide = div def rdiv(self, other): return (other / self).rename(self.name) rdiv.__doc__ = _flex_doc_SERIES.format( desc='Floating division', op_name="/", equiv="other / series", reverse='div', series_examples=_div_example_SERIES) def truediv(self, other): return (self / other).rename(self.name) truediv.__doc__ = _flex_doc_SERIES.format( desc='Floating division', op_name="/", equiv="series / other", reverse='rtruediv', series_examples=_div_example_SERIES) def rtruediv(self, other): return (other / self).rename(self.name) rtruediv.__doc__ = _flex_doc_SERIES.format( desc='Floating division', op_name="/", equiv="other / series", reverse='truediv', series_examples=_div_example_SERIES) def mul(self, other): return (self * other).rename(self.name) mul.__doc__ = _flex_doc_SERIES.format( desc='Multiplication', op_name="*", equiv="series * other", reverse='rmul', series_examples=_mul_example_SERIES) multiply = mul def rmul(self, other): return (other * self).rename(self.name) rmul.__doc__ = _flex_doc_SERIES.format( desc='Multiplication', op_name="*", equiv="other * series", reverse='mul', series_examples=_mul_example_SERIES) def sub(self, other): return (self - other).rename(self.name) sub.__doc__ = _flex_doc_SERIES.format( desc='Subtraction', op_name="-", equiv="series - other", reverse='rsub', series_examples=_sub_example_SERIES) subtract = sub def rsub(self, other): return (other - self).rename(self.name) rsub.__doc__ = _flex_doc_SERIES.format( desc='Subtraction', op_name="-", equiv="other - series", reverse='sub', series_examples=_sub_example_SERIES) # TODO: arg should support Series # TODO: NaN and None def map(self, arg): """ Map values of Series according to input correspondence. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict``. .. note:: make sure the size of the dictionary is not huge because it could downgrade the performance or throw OutOfMemoryError due to a huge expression within Spark. Consider the input as a functions as an alternative instead in this case. Parameters ---------- arg : function or dict Mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``None``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``None``. Examples -------- >>> s = ks.Series(['cat', 'dog', None, 'rabbit']) >>> s 0 cat 1 dog 2 None 3 rabbit Name: 0, dtype: object ``map`` accepts a ``dict``. Values that are not found in the ``dict`` are converted to ``None``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 None 3 None Name: 0, dtype: object It also accepts a function: >>> def format(x) -> str: ... return 'I am a {}'.format(x) >>> s.map(format) 0 I am a cat 1 I am a dog 2 I am a None 3 I am a rabbit Name: 0, dtype: object """ if isinstance(arg, dict): is_start = True # In case dictionary is empty. current = F.when(F.lit(False), F.lit(None).cast(self.spark_type)) for to_replace, value in arg.items(): if is_start: current = F.when(self._scol == F.lit(to_replace), value) is_start = False else: current = current.when(self._scol == F.lit(to_replace), value) if hasattr(arg, "__missing__"): tmp_val = arg[np._NoValue] del arg[np._NoValue] # Remove in case it's set in defaultdict. current = current.otherwise(F.lit(tmp_val)) else: current = current.otherwise(F.lit(None).cast(self.spark_type)) return Series(current, anchor=self._kdf, index=self._index_map).rename(self.name) else: return self.apply(arg) def astype(self, dtype) -> 'Series': """ Cast a Koalas object to a specified dtype ``dtype``. Parameters ---------- dtype : data type Use a numpy.dtype or Python type to cast entire pandas object to the same type. Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. Examples -------- >>> ser = ks.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 Name: 0, dtype: int32 >>> ser.astype('int64') 0 1 1 2 Name: 0, dtype: int64 """ from databricks.koalas.typedef import as_spark_type spark_type = as_spark_type(dtype) if not spark_type: raise ValueError("Type {} not understood".format(dtype)) return Series(self._scol.cast(spark_type), anchor=self._kdf, index=self._index_map) def getField(self, name): if not isinstance(self.schema, StructType): raise AttributeError("Not a struct: {}".format(self.schema)) else: fnames = self.schema.fieldNames() if name not in fnames: raise AttributeError( "Field {} not found, possible values are {}".format(name, ", ".join(fnames))) return Series(self._scol.getField(name), anchor=self._kdf, index=self._index_map) def alias(self, name): """An alias for :meth:`Series.rename`.""" return self.rename(name) @property def schema(self) -> StructType: """Return the underlying Spark DataFrame's schema.""" return self.to_dataframe()._sdf.schema @property def shape(self): """Return a tuple of the shape of the underlying data.""" return len(self), @property def name(self) -> str: """Return name of the Series.""" return self._metadata.data_columns[0] @name.setter def name(self, name): self.rename(name, inplace=True) # TODO: Functionality and documentation should be matched. Currently, changing index labels # taking dictionary and function to change index are not supported. def rename(self, index=None, **kwargs): """ Alter Series name. Parameters ---------- index : scalar Scalar will alter the ``Series.name`` attribute. inplace : bool, default False Whether to return a new Series. If True then value of copy is ignored. Returns ------- Series Series with name altered. Examples -------- >>> s = ks.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 Name: 0, dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 """ if index is None: return self scol = self._scol.alias(index) if kwargs.get('inplace', False): self._scol = scol return self else: return Series(scol, anchor=self._kdf, index=self._index_map) @property def _metadata(self): return self.to_dataframe()._metadata @property def index(self): """The index (axis labels) Column of the Series. Currently not supported when the DataFrame has no index. See Also -------- Index """ return self._kdf.index @property def is_unique(self): """ Return boolean if values in the object are unique Returns ------- is_unique : boolean >>> ks.Series([1, 2, 3]).is_unique True >>> ks.Series([1, 2, 2]).is_unique False >>> ks.Series([1, 2, 3, None]).is_unique True """ sdf = self._kdf._sdf.select(self._scol) col = self._scol # Here we check: # 1. the distinct count without nulls and count without nulls for non-null values # 2. count null values and see if null is a distinct value. # # This workaround is in order to calculate the distinct count including nulls in # single pass. Note that COUNT(DISTINCT expr) in Spark is designed to ignore nulls. return sdf.select( (F.count(col) == F.countDistinct(col)) & (F.count(F.when(col.isNull(), 1).otherwise(None)) <= 1) ).collect()[0][0] def reset_index(self, level=None, drop=False, name=None, inplace=False): """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses self.name by default. This argument is ignored when drop is True. inplace : bool, default False Modify the Series in place (do not create a new object). Returns ------- Series or DataFrame When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. Examples -------- >>> s = ks.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 To update the Series in place, without generating a new one set `inplace` to True. Note that it also requires ``drop=True``. >>> s.reset_index(inplace=True, drop=True) >>> s 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 """ if inplace and not drop: raise TypeError('Cannot reset_index inplace on a Series to create a DataFrame') if name is not None: kdf = self.rename(name).to_dataframe() else: kdf = self.to_dataframe() kdf = kdf.reset_index(level=level, drop=drop) if drop: s = _col(kdf) if inplace: self._kdf = kdf self._scol = s._scol self._index_map = s._index_map else: return s else: return kdf def to_dataframe(self) -> spark.DataFrame: sdf = self._kdf._sdf.select([field for field, _ in self._index_map] + [self._scol]) metadata = Metadata(data_columns=[sdf.schema[-1].name], index_map=self._index_map) return DataFrame(sdf, metadata) def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True, index=True, length=False, dtype=False, name=False, max_rows=None): """ Render a string representation of the Series. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional buffer to write to na_rep : string, optional string representation of NAN to use, default 'NaN' float_format : one-parameter function, optional formatter function to apply to columns' elements if they are floats default None header : boolean, default True Add the Series header (index name) index : bool, optional Add index (row) labels, default True length : boolean, default False Add the Series length dtype : boolean, default False Add the Series dtype name : boolean, default False Add the Series name if not None max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. Returns ------- formatted : string (if not buffer passed) Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats']) >>> print(df['dogs'].to_string()) 0 0.2 1 0.0 2 0.6 3 0.2 >>> print(df['dogs'].to_string(max_rows=2)) 0 0.2 1 0.0 """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: kseries = self.head(max_rows) else: kseries = self return validate_arguments_and_invoke_function( kseries.to_pandas(), self.to_string, pd.Series.to_string, args) def to_clipboard(self, excel=True, sep=None, **kwargs): # Docstring defined below by reusing DataFrame.to_clipboard's. args = locals() kseries = self return validate_arguments_and_invoke_function( kseries.to_pandas(), self.to_clipboard, pd.Series.to_clipboard, args) to_clipboard.__doc__ = DataFrame.to_clipboard.__doc__ def to_dict(self, into=dict): """ Convert Series to {label -> value} dict or dict-like object. .. note:: This method should only be used if the resulting Pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = ks.Series([1, 2, 3, 4]) >>> s_dict = s.to_dict() >>> sorted(s_dict.items()) [(0, 1), (1, 2), (2, 3), (3, 4)] >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) # doctest: +ELLIPSIS defaultdict(<class 'list'>, {...}) """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() kseries = self return validate_arguments_and_invoke_function( kseries.to_pandas(), self.to_dict, pd.Series.to_dict, args) def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None, decimal='.', multicolumn=None, multicolumn_format=None, multirow=None): args = locals() kseries = self return validate_arguments_and_invoke_function( kseries.to_pandas(), self.to_latex, pd.Series.to_latex, args) to_latex.__doc__ = DataFrame.to_latex.__doc__ def to_pandas(self): """ Return a pandas Series. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats']) >>> df['dogs'].to_pandas() 0 0.2 1 0.0 2 0.6 3 0.2 Name: dogs, dtype: float64 """ return _col(self.to_dataframe().toPandas()) # Alias to maintain backward compatibility with Spark toPandas = to_pandas def to_list(self): """ Return a list of the values. These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) .. note:: This method should only be used if the resulting list is expected to be small, as all the data is loaded into the driver's memory. """ return self.to_pandas().to_list() tolist = to_list def fillna(self, value=None, axis=None, inplace=False): """Fill NA/NaN values. Parameters ---------- value : scalar Value to use to fill holes. axis : {0 or `index`} 1 and `columns` are not supported. inplace : boolean, default False Fill in place (do not create a new object) Returns ------- Series Series with NA entries filled. Examples -------- >>> s = ks.Series([np.nan, 2, 3, 4, np.nan, 6], name='x') >>> s 0 NaN 1 2.0 2 3.0 3 4.0 4 NaN 5 6.0 Name: x, dtype: float64 Replace all NaN elements with 0s. >>> s.fillna(0) 0 0.0 1 2.0 2 3.0 3 4.0 4 0.0 5 6.0 Name: x, dtype: float64 """ ks = _col(self.to_dataframe().fillna(value=value, axis=axis, inplace=False)) if inplace: self._kdf = ks._kdf self._scol = ks._scol else: return ks def dropna(self, axis=0, inplace=False, **kwargs): """ Return a new Series with missing values removed. Parameters ---------- axis : {0 or 'index'}, default 0 There is only one axis to drop values from. inplace : bool, default False If True, do operation inplace and return None. **kwargs Not in use. Returns ------- Series Series with NA entries dropped from it. Examples -------- >>> ser = ks.Series([1., 2., np.nan]) >>> ser 0 1.0 1 2.0 2 NaN Name: 0, dtype: float64 Drop NA values from a Series. >>> ser.dropna() 0 1.0 1 2.0 Name: 0, dtype: float64 Keep the Series with valid entries in the same variable. >>> ser.dropna(inplace=True) >>> ser 0 1.0 1 2.0 Name: 0, dtype: float64 """ # TODO: last two examples from Pandas produce different results. kser = _col(self.to_dataframe().dropna(axis=axis, inplace=False)) if inplace: self._kdf = kser._kdf self._scol = kser._scol else: return kser def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> 'Series': """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Parameters ---------- lower : float or int, default None Minimum threshold value. All values below this threshold will be set to it. upper : float or int, default None Maximum threshold value. All values above this threshold will be set to it. Returns ------- Series Series with the values outside the clip boundaries replaced Examples -------- >>> ks.Series([0, 2, 4]).clip(1, 3) 0 1 1 2 2 3 Name: 0, dtype: int64 Notes ----- One difference between this implementation and pandas is that running pd.Series(['a', 'b']).clip(0, 1) will crash with "TypeError: '<=' not supported between instances of 'str' and 'int'" while ks.Series(['a', 'b']).clip(0, 1) will output the original Series, simply ignoring the incompatible types. """ return _col(self.to_dataframe().clip(lower, upper)) def head(self, n=5): """ Return the first n rows. This function returns the first n rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. Parameters ---------- n : Integer, default = 5 Returns ------- The first n rows of the caller object. Examples -------- >>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion']}) >>> df.animal.head(2) # doctest: +NORMALIZE_WHITESPACE 0 alligator 1 bee Name: animal, dtype: object """ return _col(self.to_dataframe().head(n)) # TODO: Categorical type isn't supported (due to PySpark's limitation) and # some doctests related with timestamps were not added. def unique(self): """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. .. note:: This method returns newly creased Series whereas Pandas returns the unique values as a NumPy array. Returns ------- Returns the unique values as a Series. See Examples section. Examples -------- >>> ks.Series([2, 1, 3, 3], name='A').unique() 0 1 1 3 2 2 Name: A, dtype: int64 >>> ks.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() 0 2016-01-01 Name: 0, dtype: datetime64[ns] """ sdf = self.to_dataframe()._sdf return _col(DataFrame(sdf.select(self._scol).distinct())) # TODO: Update Documentation for Bins Parameter when its supported def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True): """ Return a Series containing counts of unique values. The resulting object will be in descending order so that the first element is the most frequently-occurring element. Excludes NA values by default. Parameters ---------- normalize : boolean, default False If True then the object returned will contain the relative frequencies of the unique values. sort : boolean, default True Sort by values. ascending : boolean, default False Sort in ascending order. bins : Not Yet Supported dropna : boolean, default True Don't include counts of NaN. Returns ------- counts : Series See Also -------- Series.count: Number of non-NA elements in a Series. Examples -------- >>> df = ks.DataFrame({'x':[0, 0, 1, 1, 1, np.nan]}) >>> df.x.value_counts() # doctest: +NORMALIZE_WHITESPACE 1.0 3 0.0 2 Name: x, dtype: int64 With `normalize` set to `True`, returns the relative frequency by dividing all values by the sum of values. >>> df.x.value_counts(normalize=True) # doctest: +NORMALIZE_WHITESPACE 1.0 0.6 0.0 0.4 Name: x, dtype: float64 **dropna** With `dropna` set to `False` we can also see NaN index values. >>> df.x.value_counts(dropna=False) # doctest: +NORMALIZE_WHITESPACE 1.0 3 0.0 2 NaN 1 Name: x, dtype: int64 """ if bins is not None: raise NotImplementedError("value_counts currently does not support bins") if dropna: sdf_dropna = self._kdf._sdf.filter(self.notna()._scol) else: sdf_dropna = self._kdf._sdf sdf = sdf_dropna.groupby(self._scol).count() if sort: if ascending: sdf = sdf.orderBy(F.col('count')) else: sdf = sdf.orderBy(F.col('count').desc()) if normalize: sum = sdf_dropna.count() sdf = sdf.withColumn('count', F.col('count') / F.lit(sum)) index_name = 'index' if self.name != 'index' else 'level_0' kdf = DataFrame(sdf) kdf.columns = [index_name, self.name] kdf._metadata = Metadata(data_columns=[self.name], index_map=[(index_name, None)]) return _col(kdf) def sort_values(self, ascending: bool = True, inplace: bool = False, na_position: str = 'last') -> Union['Series', None]: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False if True, perform operation in-place na_position : {'first', 'last'}, default 'last' `first` puts NaNs at the beginning, `last` puts NaNs at the end Returns ------- sorted_obj : Series ordered by values. Examples -------- >>> s = ks.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 Name: 0, dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN Name: 0, dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN Name: 0, dtype: float64 Sort values inplace >>> s.sort_values(ascending=False, inplace=True) >>> s 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN Name: 0, dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 Name: 0, dtype: float64 Sort a series of strings >>> s = ks.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c Name: 0, dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z Name: 0, dtype: object """ ks_ = _col(self.to_dataframe().sort_values(by=self.name, ascending=ascending, na_position=na_position)) if inplace: self._kdf = ks_.to_dataframe() self._scol = ks_._scol self._index_map = ks_._index_map return None else: return ks_ def sort_index(self, axis: int = 0, level: int = None, ascending: bool = True, inplace: bool = False, kind: str = None, na_position: str = 'last') \ -> Optional['Series']: """ Sort object by labels (along an axis) Parameters ---------- axis : index, columns to direct sorting. Currently, only axis = 0 is supported. level : int or level name or list of ints or list of level names if not None, sort on values in specified index level(s) ascending : boolean, default True Sort ascending vs. descending inplace : bool, default False if True, perform operation in-place kind : str, default None Koalas does not allow specifying the sorting algorithm at the moment, default None na_position : {‘first’, ‘last’}, default ‘last’ first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for MultiIndex. Returns ------- sorted_obj : Series Examples -------- >>> df = ks.Series([2, 1, np.nan], index=['b', 'a', np.nan]) >>> df.sort_index() a 1.0 b 2.0 NaN NaN Name: 0, dtype: float64 >>> df.sort_index(ascending=False) b 2.0 a 1.0 NaN NaN Name: 0, dtype: float64 >>> df.sort_index(na_position='first') NaN NaN a 1.0 b 2.0 Name: 0, dtype: float64 >>> df.sort_index(inplace=True) >>> df a 1.0 b 2.0 NaN NaN Name: 0, dtype: float64 >>> ks.Series(range(4), index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]], name='0').sort_index() a 0 3 1 2 b 0 1 1 0 Name: 0, dtype: int64 """ if axis != 0: raise ValueError("No other axes than 0 are supported at the moment") if level is not None: raise ValueError("The 'axis' argument is not supported at the moment") if kind is not None: raise ValueError("Specifying the sorting algorithm is supported at the moment.") ks_ = _col(self.to_dataframe().sort_values(by=self._metadata.index_columns, ascending=ascending, na_position=na_position)) if inplace: self._kdf = ks_.to_dataframe() self._scol = ks_._scol self._index_map = ks_._index_map return None else: return ks_ def corr(self, other, method='pearson'): """ Compute correlation with `other` Series, excluding missing values. Parameters ---------- other : Series method : {'pearson', 'spearman'} * pearson : standard correlation coefficient * spearman : Spearman rank correlation Returns ------- correlation : float Examples -------- >>> df = ks.DataFrame({'s1': [.2, .0, .6, .2], ... 's2': [.3, .6, .0, .1]}) >>> s1 = df.s1 >>> s2 = df.s2 >>> s1.corr(s2, method='pearson') # doctest: +ELLIPSIS -0.851064... >>> s1.corr(s2, method='spearman') # doctest: +ELLIPSIS -0.948683... Notes ----- There are behavior differences between Koalas and pandas. * the `method` argument only accepts 'pearson', 'spearman' * the data should not contain NaNs. Koalas will return an error. * Koalas doesn't support the following argument(s). * `min_periods` argument is not supported """ # This implementation is suboptimal because it computes more than necessary, # but it should be a start df = self._kdf.assign(corr_arg1=self, corr_arg2=other)[["corr_arg1", "corr_arg2"]] c = df.corr(method=method) return c.loc["corr_arg1", "corr_arg2"] def nsmallest(self, n: int = 5) -> 'Series': """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. In Koalas, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Examples -------- >>> data = [1, 2, 3, 4, np.nan ,6, 7, 8] >>> s = ks.Series(data) >>> s 0 1.0 1 2.0 2 3.0 3 4.0 4 NaN 5 6.0 6 7.0 7 8.0 Name: 0, dtype: float64 The `n` largest elements where ``n=5`` by default. >>> s.nsmallest() 0 1.0 1 2.0 2 3.0 3 4.0 5 6.0 Name: 0, dtype: float64 >>> s.nsmallest(3) 0 1.0 1 2.0 2 3.0 Name: 0, dtype: float64 """ return _col(self._kdf.nsmallest(n=n, columns=self.name)) def nlargest(self, n: int = 5) -> 'Series': """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. In Koalas, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Examples -------- >>> data = [1, 2, 3, 4, np.nan ,6, 7, 8] >>> s = ks.Series(data) >>> s 0 1.0 1 2.0 2 3.0 3 4.0 4 NaN 5 6.0 6 7.0 7 8.0 Name: 0, dtype: float64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() 7 8.0 6 7.0 5 6.0 3 4.0 2 3.0 Name: 0, dtype: float64 >>> s.nlargest(n=3) 7 8.0 6 7.0 5 6.0 Name: 0, dtype: float64 """ return _col(self._kdf.nlargest(n=n, columns=self.name)) def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- nobs : int Examples -------- Constructing DataFrame from a dictionary: >>> df = ks.DataFrame({"Person": ... ["John", "Myla", "Lewis", "John", "Myla"], ... "Age": [24., np.nan, 21., 33, 26]}) Notice the uncounted NA values: >>> df['Person'].count() 5 >>> df['Age'].count() 4 """ return self._reduce_for_stat_function(_Frame._count_expr) def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False, random_state: Optional[int] = None) -> 'Series': return _col(self.to_dataframe().sample( n=n, frac=frac, replace=replace, random_state=random_state)) sample.__doc__ = DataFrame.sample.__doc__ def apply(self, func, args=(), **kwds): """ Invoke function on values of Series. Can be a Python function that only works on the Series. .. note:: unlike pandas, it is required for `func` to specify return type hint. Parameters ---------- func : function Python function to apply. Note that type hint for return type is required. args : tuple Positional arguments passed to func after the series value. **kwds Additional keyword arguments passed to func. Returns ------- Series Examples -------- Create a Series with typical summer temperatures for each city. >>> s = ks.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 Name: 0, dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x) -> np.int64: ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 Name: 0, dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword >>> def subtract_custom_value(x, custom_value) -> np.int64: ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 Name: 0, dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply`` >>> def add_custom_values(x, **kwargs) -> np.int64: ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 Name: 0, dtype: int64 Use a function from the Numpy library >>> def numpy_log(col) -> np.float64: ... return np.log(col) >>> s.apply(numpy_log) London 2.995732 New York 3.044522 Helsinki 2.484907 Name: 0, dtype: float64 """ assert callable(func), "the first argument should be a callable function." spec = inspect.getfullargspec(func) return_sig = spec.annotations.get("return", None) if return_sig is None: raise ValueError("Given function must have return type hint; however, not found.") apply_each = wraps(func)(lambda s, *a, **k: s.apply(func, args=a, **k)) wrapped = ks.pandas_wraps(return_col=return_sig)(apply_each) return wrapped(self, *args, **kwds).rename(self.name) def describe(self, percentiles: Optional[List[float]] = None) -> 'Series': return _col(self.to_dataframe().describe(percentiles)) describe.__doc__ = DataFrame.describe.__doc__ def _reduce_for_stat_function(self, sfun): from inspect import signature num_args = len(signature(sfun).parameters) col_sdf = self._scol col_type = self.schema[self.name].dataType if isinstance(col_type, BooleanType) and sfun.__name__ not in ('min', 'max'): # Stat functions cannot be used with boolean values by default # Thus, cast to integer (true to 1 and false to 0) # Exclude the min and max methods though since those work with booleans col_sdf = col_sdf.cast('integer') if num_args == 1: # Only pass in the column if sfun accepts only one arg col_sdf = sfun(col_sdf) else: # must be 2 assert num_args == 2 # Pass in both the column and its data type if sfun accepts two args col_sdf = sfun(col_sdf, col_type) return _unpack_scalar(self._kdf._sdf.select(col_sdf)) def __len__(self): return len(self.to_dataframe()) def __getitem__(self, key): return Series(self._scol.__getitem__(key), anchor=self._kdf, index=self._index_map) def __getattr__(self, item: str) -> Any: if item.startswith("__") or item.startswith("_pandas_") or item.startswith("_spark_"): raise AttributeError(item) if hasattr(_MissingPandasLikeSeries, item): property_or_func = getattr(_MissingPandasLikeSeries, item) if isinstance(property_or_func, property): return property_or_func.fget(self) # type: ignore else: return partial(property_or_func, self) return self.getField(item) def __str__(self): return self._pandas_orig_repr() def __repr__(self): pser = self.head(max_display_count + 1).to_pandas() pser_length = len(pser) repr_string = repr(pser.iloc[:max_display_count]) if pser_length > max_display_count: rest, prev_footer = repr_string.rsplit("\n", 1) match = REPR_PATTERN.search(prev_footer) if match is not None: length = match.group("length") footer = ("\n{prev_footer}\nShowing only the first {length}" .format(length=length, prev_footer=prev_footer)) return rest + footer return repr_string def __dir__(self): if not isinstance(self.schema, StructType): fields = [] else: fields = [f for f in self.schema.fieldNames() if ' ' not in f] return super(Series, self).__dir__() + fields def _pandas_orig_repr(self): # TODO: figure out how to reuse the original one. return 'Column<%s>' % self._scol._jc.toString().encode('utf8') def _unpack_scalar(sdf): """ Takes a dataframe that is supposed to contain a single row with a single scalar value, and returns this value. """ l = sdf.head(2) assert len(l) == 1, (sdf, l) row = l[0] l2 = list(row.asDict().values()) assert len(l2) == 1, (row, l2) return l2[0] def _col(df): assert isinstance(df, (DataFrame, pd.DataFrame)), type(df) return df[df.columns[0]]
1
9,787
Maybe inline the CachedAccessor code? Is it similar to our lazy property? If yes, can we reconcile the two?
databricks-koalas
py
@@ -34,7 +34,7 @@ func (c SmokeTestCase) BuildInputShape(ref *ShapeRef) string { ) } -// AttachSmokeTests attaches the smoke test cases to the API model. + // AttachSmokeTests attaches the smoke test cases to the API model. func (a *API) AttachSmokeTests(filename string) { f, err := os.Open(filename) if err != nil {
1
// +build codegen package api import ( "bytes" "encoding/json" "fmt" "os" "text/template" ) // SmokeTestSuite defines the test suite for smoke tests. type SmokeTestSuite struct { Version int `json:"version"` DefaultRegion string `json:"defaultRegion"` TestCases []SmokeTestCase `json:"testCases"` } // SmokeTestCase provides the definition for a integration smoke test case. type SmokeTestCase struct { OpName string `json:"operationName"` Input map[string]interface{} `json:"input"` ExpectErr bool `json:"errorExpectedFromService"` } // BuildInputShape returns the Go code as a string for initializing the test // case's input shape. func (c SmokeTestCase) BuildInputShape(ref *ShapeRef) string { var b ShapeValueBuilder return fmt.Sprintf("&%s{\n%s\n}", b.GoType(ref, true), b.BuildShape(ref, c.Input, false), ) } // AttachSmokeTests attaches the smoke test cases to the API model. func (a *API) AttachSmokeTests(filename string) { f, err := os.Open(filename) if err != nil { panic(fmt.Sprintf("failed to open smoke tests %s, err: %v", filename, err)) } defer f.Close() if err := json.NewDecoder(f).Decode(&a.SmokeTests); err != nil { panic(fmt.Sprintf("failed to decode smoke tests %s, err: %v", filename, err)) } if v := a.SmokeTests.Version; v != 1 { panic(fmt.Sprintf("invalid smoke test version, %d", v)) } } // APISmokeTestsGoCode returns the Go Code string for the smoke tests. func (a *API) APISmokeTestsGoCode() string { w := bytes.NewBuffer(nil) a.resetImports() a.AddImport("context") a.AddImport("testing") a.AddImport("time") a.AddSDKImport("aws") a.AddSDKImport("aws/request") a.AddSDKImport("aws/awserr") a.AddSDKImport("aws/request") a.AddSDKImport("awstesting/integration") a.AddImport(a.ImportPath()) smokeTests := struct { API *API SmokeTestSuite }{ API: a, SmokeTestSuite: a.SmokeTests, } if err := smokeTestTmpl.Execute(w, smokeTests); err != nil { panic(fmt.Sprintf("failed to create smoke tests, %v", err)) } ignoreImports := ` var _ aws.Config var _ awserr.Error var _ request.Request ` return a.importsGoCode() + ignoreImports + w.String() } var smokeTestTmpl = template.Must(template.New(`smokeTestTmpl`).Parse(` {{- range $i, $testCase := $.TestCases }} {{- $op := index $.API.Operations $testCase.OpName }} func TestInteg_{{ printf "%02d" $i }}_{{ $op.ExportedName }}(t *testing.T) { ctx, cancelFn := context.WithTimeout(context.Background(), 5 *time.Second) defer cancelFn() sess := integration.SessionWithDefaultRegion("{{ $.DefaultRegion }}") svc := {{ $.API.PackageName }}.New(sess) params := {{ $testCase.BuildInputShape $op.InputRef }} _, err := svc.{{ $op.ExportedName }}WithContext(ctx, params) {{- if $testCase.ExpectErr }} if err == nil { t.Fatalf("expect request to fail") } aerr, ok := err.(awserr.RequestFailure) if !ok { t.Fatalf("expect awserr, was %T", err) } if len(aerr.Code()) == 0 { t.Errorf("expect non-empty error code") } if v := aerr.Code(); v == request.ErrCodeSerialization { t.Errorf("expect API error code got serialization failure") } {{- else }} if err != nil { t.Errorf("expect no error, got %v", err) } {{- end }} } {{- end }} `))
1
9,802
Nit this file has unintended changes.
aws-aws-sdk-go
go
@@ -101,6 +101,8 @@ public class FeedItemMenuHandler { mi.setItemVisibility(R.id.share_download_url_with_position_item, false); } + mi.setItemVisibility(R.id.share_file, selectedItem.getMedia().fileExists()); + if (selectedItem.isPlayed()) { mi.setItemVisibility(R.id.mark_read_item, false); } else {
1
package de.danoeh.antennapod.menuhandler; import android.content.Context; import android.content.Intent; import android.net.Uri; import android.support.annotation.Nullable; import android.util.Log; import android.widget.Toast; import de.danoeh.antennapod.R; import de.danoeh.antennapod.core.feed.FeedItem; import de.danoeh.antennapod.core.feed.FeedMedia; import de.danoeh.antennapod.core.gpoddernet.model.GpodnetEpisodeAction; import de.danoeh.antennapod.core.gpoddernet.model.GpodnetEpisodeAction.Action; import de.danoeh.antennapod.core.preferences.GpodnetPreferences; import de.danoeh.antennapod.core.preferences.UserPreferences; import de.danoeh.antennapod.core.service.playback.PlaybackService; import de.danoeh.antennapod.core.storage.DBTasks; import de.danoeh.antennapod.core.storage.DBWriter; import de.danoeh.antennapod.core.storage.DownloadRequestException; import de.danoeh.antennapod.core.util.IntentUtils; import de.danoeh.antennapod.core.util.LongList; import de.danoeh.antennapod.core.util.ShareUtils; /** * Handles interactions with the FeedItemMenu. */ public class FeedItemMenuHandler { private static final String TAG = "FeedItemMenuHandler"; private FeedItemMenuHandler() { } /** * Used by the MenuHandler to access different types of menus through one * interface */ public interface MenuInterface { /** * Implementations of this method should call findItem(id) on their * menu-object and call setVisibility(visibility) on the returned * MenuItem object. */ void setItemVisibility(int id, boolean visible); } /** * This method should be called in the prepare-methods of menus. It changes * the visibility of the menu items depending on a FeedItem's attributes. * * @param mi An instance of MenuInterface that the method uses to change a * MenuItem's visibility * @param selectedItem The FeedItem for which the menu is supposed to be prepared * @param showExtendedMenu True if MenuItems that let the user share information about * the FeedItem and visit its website should be set visible. This * parameter should be set to false if the menu space is limited. * @param queueAccess Used for testing if the queue contains the selected item; only used for * move to top/bottom in the queue * @return Returns true if selectedItem is not null. */ public static boolean onPrepareMenu(MenuInterface mi, FeedItem selectedItem, boolean showExtendedMenu, @Nullable LongList queueAccess) { if (selectedItem == null) { return false; } boolean hasMedia = selectedItem.getMedia() != null; boolean isPlaying = hasMedia && selectedItem.getState() == FeedItem.State.PLAYING; if (!isPlaying) { mi.setItemVisibility(R.id.skip_episode_item, false); } boolean isInQueue = selectedItem.isTagged(FeedItem.TAG_QUEUE); if(queueAccess == null || queueAccess.size() == 0 || queueAccess.get(0) == selectedItem.getId()) { mi.setItemVisibility(R.id.move_to_top_item, false); } if(queueAccess == null || queueAccess.size() == 0 || queueAccess.get(queueAccess.size()-1) == selectedItem.getId()) { mi.setItemVisibility(R.id.move_to_bottom_item, false); } if (!isInQueue) { mi.setItemVisibility(R.id.remove_from_queue_item, false); } if (!(!isInQueue && selectedItem.getMedia() != null)) { mi.setItemVisibility(R.id.add_to_queue_item, false); } if (!showExtendedMenu || selectedItem.getLink() == null) { mi.setItemVisibility(R.id.visit_website_item, false); mi.setItemVisibility(R.id.share_link_item, false); mi.setItemVisibility(R.id.share_link_with_position_item, false); } if (!showExtendedMenu || !hasMedia || selectedItem.getMedia().getDownload_url() == null) { mi.setItemVisibility(R.id.share_download_url_item, false); mi.setItemVisibility(R.id.share_download_url_with_position_item, false); } if(!hasMedia || selectedItem.getMedia().getPosition() <= 0) { mi.setItemVisibility(R.id.share_link_with_position_item, false); mi.setItemVisibility(R.id.share_download_url_with_position_item, false); } if (selectedItem.isPlayed()) { mi.setItemVisibility(R.id.mark_read_item, false); } else { mi.setItemVisibility(R.id.mark_unread_item, false); } if(selectedItem.getMedia() == null || selectedItem.getMedia().getPosition() == 0) { mi.setItemVisibility(R.id.reset_position, false); } if(!UserPreferences.isEnableAutodownload()) { mi.setItemVisibility(R.id.activate_auto_download, false); mi.setItemVisibility(R.id.deactivate_auto_download, false); } else if(selectedItem.getAutoDownload()) { mi.setItemVisibility(R.id.activate_auto_download, false); } else { mi.setItemVisibility(R.id.deactivate_auto_download, false); } if (selectedItem.getPaymentLink() == null || !selectedItem.getFlattrStatus().flattrable()) { mi.setItemVisibility(R.id.support_item, false); } boolean isFavorite = selectedItem.isTagged(FeedItem.TAG_FAVORITE); mi.setItemVisibility(R.id.add_to_favorites_item, !isFavorite); mi.setItemVisibility(R.id.remove_from_favorites_item, isFavorite); return true; } /** * The same method as onPrepareMenu(MenuInterface, FeedItem, boolean, QueueAccess), but lets the * caller also specify a list of menu items that should not be shown. * * @param excludeIds Menu item that should be excluded * @return true if selectedItem is not null. */ public static boolean onPrepareMenu(MenuInterface mi, FeedItem selectedItem, boolean showExtendedMenu, LongList queueAccess, int... excludeIds) { boolean rc = onPrepareMenu(mi, selectedItem, showExtendedMenu, queueAccess); if (rc && excludeIds != null) { for (int id : excludeIds) { mi.setItemVisibility(id, false); } } return rc; } public static boolean onMenuItemClicked(Context context, int menuItemId, FeedItem selectedItem) throws DownloadRequestException { switch (menuItemId) { case R.id.skip_episode_item: context.sendBroadcast(new Intent(PlaybackService.ACTION_SKIP_CURRENT_EPISODE)); break; case R.id.remove_item: DBWriter.deleteFeedMediaOfItem(context, selectedItem.getMedia().getId()); break; case R.id.mark_read_item: selectedItem.setPlayed(true); DBWriter.markItemPlayed(selectedItem, FeedItem.PLAYED, false); if(GpodnetPreferences.loggedIn()) { FeedMedia media = selectedItem.getMedia(); // not all items have media, Gpodder only cares about those that do if (media != null) { GpodnetEpisodeAction actionPlay = new GpodnetEpisodeAction.Builder(selectedItem, Action.PLAY) .currentDeviceId() .currentTimestamp() .started(media.getDuration() / 1000) .position(media.getDuration() / 1000) .total(media.getDuration() / 1000) .build(); GpodnetPreferences.enqueueEpisodeAction(actionPlay); } } break; case R.id.mark_unread_item: selectedItem.setPlayed(false); DBWriter.markItemPlayed(selectedItem, FeedItem.UNPLAYED, false); if(GpodnetPreferences.loggedIn() && selectedItem.getMedia() != null) { GpodnetEpisodeAction actionNew = new GpodnetEpisodeAction.Builder(selectedItem, Action.NEW) .currentDeviceId() .currentTimestamp() .build(); GpodnetPreferences.enqueueEpisodeAction(actionNew); } break; case R.id.add_to_queue_item: DBWriter.addQueueItem(context, selectedItem); break; case R.id.remove_from_queue_item: DBWriter.removeQueueItem(context, selectedItem, true); break; case R.id.add_to_favorites_item: DBWriter.addFavoriteItem(selectedItem); break; case R.id.remove_from_favorites_item: DBWriter.removeFavoriteItem(selectedItem); break; case R.id.reset_position: selectedItem.getMedia().setPosition(0); DBWriter.markItemPlayed(selectedItem, FeedItem.UNPLAYED, true); break; case R.id.activate_auto_download: selectedItem.setAutoDownload(true); DBWriter.setFeedItemAutoDownload(selectedItem, true); break; case R.id.deactivate_auto_download: selectedItem.setAutoDownload(false); DBWriter.setFeedItemAutoDownload(selectedItem, false); break; case R.id.visit_website_item: Uri uri = Uri.parse(selectedItem.getLink()); Intent intent = new Intent(Intent.ACTION_VIEW, uri); if(IntentUtils.isCallable(context, intent)) { context.startActivity(intent); } else { Toast.makeText(context, context.getString(R.string.download_error_malformed_url), Toast.LENGTH_SHORT).show(); } break; case R.id.support_item: DBTasks.flattrItemIfLoggedIn(context, selectedItem); break; case R.id.share_link_item: ShareUtils.shareFeedItemLink(context, selectedItem); break; case R.id.share_download_url_item: ShareUtils.shareFeedItemDownloadLink(context, selectedItem); break; case R.id.share_link_with_position_item: ShareUtils.shareFeedItemLink(context, selectedItem, true); break; case R.id.share_download_url_with_position_item: ShareUtils.shareFeedItemDownloadLink(context, selectedItem, true); break; default: Log.d(TAG, "Unknown menuItemId: " + menuItemId); return false; } // Refresh menu state return true; } }
1
13,518
Potential NPE? `hasMedia && selectedItem...`
AntennaPod-AntennaPod
java
@@ -1662,7 +1662,7 @@ class EC2Connection(AWSQueryConnection): params['AutoEnableIO.Value'] = new_value return self.get_status('ModifyVolumeAttribute', params, verb='POST') - def create_volume(self, size, zone, snapshot=None, + def create_volume(self, size, zone = None, snapshot=None, volume_type=None, iops=None): """ Create a new EBS Volume.
1
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2010, Eucalyptus Systems, Inc. # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ Represents a connection to the EC2 service. """ import base64 import warnings from datetime import datetime from datetime import timedelta import boto from boto.connection import AWSQueryConnection from boto.resultset import ResultSet from boto.ec2.image import Image, ImageAttribute from boto.ec2.instance import Reservation, Instance from boto.ec2.instance import ConsoleOutput, InstanceAttribute from boto.ec2.keypair import KeyPair from boto.ec2.address import Address from boto.ec2.volume import Volume, VolumeAttribute from boto.ec2.snapshot import Snapshot from boto.ec2.snapshot import SnapshotAttribute from boto.ec2.zone import Zone from boto.ec2.securitygroup import SecurityGroup from boto.ec2.regioninfo import RegionInfo from boto.ec2.instanceinfo import InstanceInfo from boto.ec2.reservedinstance import ReservedInstancesOffering from boto.ec2.reservedinstance import ReservedInstance from boto.ec2.reservedinstance import ReservedInstanceListing from boto.ec2.spotinstancerequest import SpotInstanceRequest from boto.ec2.spotpricehistory import SpotPriceHistory from boto.ec2.spotdatafeedsubscription import SpotDatafeedSubscription from boto.ec2.bundleinstance import BundleInstanceTask from boto.ec2.placementgroup import PlacementGroup from boto.ec2.tag import Tag from boto.ec2.vmtype import VmType from boto.ec2.instancestatus import InstanceStatusSet from boto.ec2.volumestatus import VolumeStatusSet from boto.ec2.networkinterface import NetworkInterface from boto.exception import EC2ResponseError #boto.set_stream_logger('ec2') class EC2Connection(AWSQueryConnection): APIVersion = boto.config.get('Boto', 'ec2_version', '2012-12-01') DefaultRegionName = boto.config.get('Boto', 'ec2_region_name', 'us-east-1') DefaultRegionEndpoint = boto.config.get('Boto', 'ec2_region_endpoint', 'ec2.us-east-1.amazonaws.com') ResponseError = EC2ResponseError def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, host=None, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', api_version=None, security_token=None, validate_certs=True): """ Init method to create a new connection to EC2. """ if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) self.region = region AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path, security_token, validate_certs=validate_certs) if api_version: self.APIVersion = api_version def _required_auth_capability(self): return ['ec2'] def get_params(self): """ Returns a dictionary containing the value of of all of the keyword arguments passed when constructing this connection. """ param_names = ['aws_access_key_id', 'aws_secret_access_key', 'is_secure', 'port', 'proxy', 'proxy_port', 'proxy_user', 'proxy_pass', 'debug', 'https_connection_factory'] params = {} for name in param_names: params[name] = getattr(self, name) return params def build_filter_params(self, params, filters): i = 1 for name in filters: aws_name = name if not aws_name.startswith('tag:'): aws_name = name.replace('_', '-') params['Filter.%d.Name' % i] = aws_name value = filters[name] if not isinstance(value, list): value = [value] j = 1 for v in value: params['Filter.%d.Value.%d' % (i, j)] = v j += 1 i += 1 # Image methods def get_all_images(self, image_ids=None, owners=None, executable_by=None, filters=None): """ Retrieve all the EC2 images available on your account. :type image_ids: list :param image_ids: A list of strings with the image IDs wanted :type owners: list :param owners: A list of owner IDs :type executable_by: list :param executable_by: Returns AMIs for which the specified user ID has explicit launch permissions :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.image.Image` """ params = {} if image_ids: self.build_list_params(params, image_ids, 'ImageId') if owners: self.build_list_params(params, owners, 'Owner') if executable_by: self.build_list_params(params, executable_by, 'ExecutableBy') if filters: self.build_filter_params(params, filters) return self.get_list('DescribeImages', params, [('item', Image)], verb='POST') def get_all_kernels(self, kernel_ids=None, owners=None): """ Retrieve all the EC2 kernels available on your account. Constructs a filter to allow the processing to happen server side. :type kernel_ids: list :param kernel_ids: A list of strings with the image IDs wanted :type owners: list :param owners: A list of owner IDs :rtype: list :return: A list of :class:`boto.ec2.image.Image` """ params = {} if kernel_ids: self.build_list_params(params, kernel_ids, 'ImageId') if owners: self.build_list_params(params, owners, 'Owner') filter = {'image-type': 'kernel'} self.build_filter_params(params, filter) return self.get_list('DescribeImages', params, [('item', Image)], verb='POST') def get_all_ramdisks(self, ramdisk_ids=None, owners=None): """ Retrieve all the EC2 ramdisks available on your account. Constructs a filter to allow the processing to happen server side. :type ramdisk_ids: list :param ramdisk_ids: A list of strings with the image IDs wanted :type owners: list :param owners: A list of owner IDs :rtype: list :return: A list of :class:`boto.ec2.image.Image` """ params = {} if ramdisk_ids: self.build_list_params(params, ramdisk_ids, 'ImageId') if owners: self.build_list_params(params, owners, 'Owner') filter = {'image-type': 'ramdisk'} self.build_filter_params(params, filter) return self.get_list('DescribeImages', params, [('item', Image)], verb='POST') def get_image(self, image_id): """ Shortcut method to retrieve a specific image (AMI). :type image_id: string :param image_id: the ID of the Image to retrieve :rtype: :class:`boto.ec2.image.Image` :return: The EC2 Image specified or None if the image is not found """ try: return self.get_all_images(image_ids=[image_id])[0] except IndexError: # None of those images available return None def register_image(self, name=None, description=None, image_location=None, architecture=None, kernel_id=None, ramdisk_id=None, root_device_name=None, block_device_map=None): """ Register an image. :type name: string :param name: The name of the AMI. Valid only for EBS-based images. :type description: string :param description: The description of the AMI. :type image_location: string :param image_location: Full path to your AMI manifest in Amazon S3 storage. Only used for S3-based AMI's. :type architecture: string :param architecture: The architecture of the AMI. Valid choices are: * i386 * x86_64 :type kernel_id: string :param kernel_id: The ID of the kernel with which to launch the instances :type root_device_name: string :param root_device_name: The root device name (e.g. /dev/sdh) :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` :param block_device_map: A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. :rtype: string :return: The new image id """ params = {} if name: params['Name'] = name if description: params['Description'] = description if architecture: params['Architecture'] = architecture if kernel_id: params['KernelId'] = kernel_id if ramdisk_id: params['RamdiskId'] = ramdisk_id if image_location: params['ImageLocation'] = image_location if root_device_name: params['RootDeviceName'] = root_device_name if block_device_map: block_device_map.build_list_params(params) rs = self.get_object('RegisterImage', params, ResultSet, verb='POST') image_id = getattr(rs, 'imageId', None) return image_id def deregister_image(self, image_id, delete_snapshot=False): """ Unregister an AMI. :type image_id: string :param image_id: the ID of the Image to unregister :type delete_snapshot: bool :param delete_snapshot: Set to True if we should delete the snapshot associated with an EBS volume mounted at /dev/sda1 :rtype: bool :return: True if successful """ snapshot_id = None if delete_snapshot: image = self.get_image(image_id) for key in image.block_device_mapping: if key == "/dev/sda1": snapshot_id = image.block_device_mapping[key].snapshot_id break result = self.get_status('DeregisterImage', {'ImageId':image_id}, verb='POST') if result and snapshot_id: return result and self.delete_snapshot(snapshot_id) return result def create_image(self, instance_id, name, description=None, no_reboot=False): """ Will create an AMI from the instance in the running or stopped state. :type instance_id: string :param instance_id: the ID of the instance to image. :type name: string :param name: The name of the new image :type description: string :param description: An optional human-readable string describing the contents and purpose of the AMI. :type no_reboot: bool :param no_reboot: An optional flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the responsibility of maintaining file system integrity is left to the owner of the instance. :rtype: string :return: The new image id """ params = {'InstanceId': instance_id, 'Name': name} if description: params['Description'] = description if no_reboot: params['NoReboot'] = 'true' img = self.get_object('CreateImage', params, Image, verb='POST') return img.id # ImageAttribute methods def get_image_attribute(self, image_id, attribute='launchPermission'): """ Gets an attribute from an image. :type image_id: string :param image_id: The Amazon image id for which you want info about :type attribute: string :param attribute: The attribute you need information about. Valid choices are: * launchPermission * productCodes * blockDeviceMapping :rtype: :class:`boto.ec2.image.ImageAttribute` :return: An ImageAttribute object representing the value of the attribute requested """ params = {'ImageId': image_id, 'Attribute': attribute} return self.get_object('DescribeImageAttribute', params, ImageAttribute, verb='POST') def modify_image_attribute(self, image_id, attribute='launchPermission', operation='add', user_ids=None, groups=None, product_codes=None): """ Changes an attribute of an image. :type image_id: string :param image_id: The image id you wish to change :type attribute: string :param attribute: The attribute you wish to change :type operation: string :param operation: Either add or remove (this is required for changing launchPermissions) :type user_ids: list :param user_ids: The Amazon IDs of users to add/remove attributes :type groups: list :param groups: The groups to add/remove attributes :type product_codes: list :param product_codes: Amazon DevPay product code. Currently only one product code can be associated with an AMI. Once set, the product code cannot be changed or reset. """ params = {'ImageId': image_id, 'Attribute': attribute, 'OperationType': operation} if user_ids: self.build_list_params(params, user_ids, 'UserId') if groups: self.build_list_params(params, groups, 'UserGroup') if product_codes: self.build_list_params(params, product_codes, 'ProductCode') return self.get_status('ModifyImageAttribute', params, verb='POST') def reset_image_attribute(self, image_id, attribute='launchPermission'): """ Resets an attribute of an AMI to its default value. :type image_id: string :param image_id: ID of the AMI for which an attribute will be described :type attribute: string :param attribute: The attribute to reset :rtype: bool :return: Whether the operation succeeded or not """ params = {'ImageId': image_id, 'Attribute': attribute} return self.get_status('ResetImageAttribute', params, verb='POST') # Instance methods def get_all_instances(self, instance_ids=None, filters=None): """ Retrieve all the instances associated with your account. :type instance_ids: list :param instance_ids: A list of strings of instance IDs :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.instance.Reservation` """ params = {} if instance_ids: self.build_list_params(params, instance_ids, 'InstanceId') if filters: if 'group-id' in filters: gid = filters.get('group-id') if not gid.startswith('sg-') or len(gid) != 11: warnings.warn( "The group-id filter now requires a security group " "identifier (sg-*) instead of a group name. To filter " "by group name use the 'group-name' filter instead.", UserWarning) self.build_filter_params(params, filters) return self.get_list('DescribeInstances', params, [('item', Reservation)], verb='POST') def get_all_instance_status(self, instance_ids=None, max_results=None, next_token=None, filters=None): """ Retrieve all the instances in your account scheduled for maintenance. :type instance_ids: list :param instance_ids: A list of strings of instance IDs :type max_results: int :param max_results: The maximum number of paginated instance items per response. :type next_token: str :param next_token: A string specifying the next paginated set of results to return. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of instances that have maintenance scheduled. """ params = {} if instance_ids: self.build_list_params(params, instance_ids, 'InstanceId') if max_results: params['MaxResults'] = max_results if next_token: params['NextToken'] = next_token if filters: self.build_filter_params(params, filters) return self.get_object('DescribeInstanceStatus', params, InstanceStatusSet, verb='POST') def run_instances(self, image_id, min_count=1, max_count=1, key_name=None, security_groups=None, user_data=None, addressing_type=None, instance_type='m1.small', placement=None, kernel_id=None, ramdisk_id=None, monitoring_enabled=False, subnet_id=None, block_device_map=None, disable_api_termination=False, instance_initiated_shutdown_behavior=None, private_ip_address=None, placement_group=None, client_token=None, security_group_ids=None, additional_info=None, instance_profile_name=None, instance_profile_arn=None, tenancy=None, ebs_optimized=False, network_interfaces=None): """ Runs an image on EC2. :type image_id: string :param image_id: The ID of the image to run. :type min_count: int :param min_count: The minimum number of instances to launch. :type max_count: int :param max_count: The maximum number of instances to launch. :type key_name: string :param key_name: The name of the key pair with which to launch instances. :type security_groups: list of strings :param security_groups: The names of the security groups with which to associate instances :type user_data: string :param user_data: The user data passed to the launched instances :type instance_type: string :param instance_type: The type of instance to run: * t1.micro * m1.small * m1.medium * m1.large * m1.xlarge * c1.medium * c1.xlarge * m2.xlarge * m2.2xlarge * m2.4xlarge * cc1.4xlarge * cg1.4xlarge * cc2.8xlarge :type placement: string :param placement: The availability zone in which to launch the instances. :type kernel_id: string :param kernel_id: The ID of the kernel with which to launch the instances. :type ramdisk_id: string :param ramdisk_id: The ID of the RAM disk with which to launch the instances. :type monitoring_enabled: bool :param monitoring_enabled: Enable CloudWatch monitoring on the instance. :type subnet_id: string :param subnet_id: The subnet ID within which to launch the instances for VPC. :type private_ip_address: string :param private_ip_address: If you're using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` :param block_device_map: A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. :type disable_api_termination: bool :param disable_api_termination: If True, the instances will be locked and will not be able to be terminated via the API. :type instance_initiated_shutdown_behavior: string :param instance_initiated_shutdown_behavior: Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: * stop * terminate :type placement_group: string :param placement_group: If specified, this is the name of the placement group in which the instance(s) will be launched. :type client_token: string :param client_token: Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. :type security_group_ids: list of strings :param security_group_ids: The ID of the VPC security groups with which to associate instances. :type additional_info: string :param additional_info: Specifies additional information to make available to the instance(s). :type tenancy: string :param tenancy: The tenancy of the instance you want to launch. An instance with a tenancy of 'dedicated' runs on single-tenant hardware and can only be launched into a VPC. Valid values are:"default" or "dedicated". NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. :type instance_profile_arn: string :param instance_profile_arn: The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. :type instance_profile_name: string :param instance_profile_name: The name of the IAM Instance Profile (IIP) to associate with the instances. :type ebs_optimized: bool :param ebs_optimized: Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. :type network_interfaces: list :param network_interfaces: A list of :class:`boto.ec2.networkinterface.NetworkInterfaceSpecification` :rtype: Reservation :return: The :class:`boto.ec2.instance.Reservation` associated with the request for machines """ params = {'ImageId': image_id, 'MinCount': min_count, 'MaxCount': max_count} if key_name: params['KeyName'] = key_name if security_group_ids: l = [] for group in security_group_ids: if isinstance(group, SecurityGroup): l.append(group.id) else: l.append(group) self.build_list_params(params, l, 'SecurityGroupId') if security_groups: l = [] for group in security_groups: if isinstance(group, SecurityGroup): l.append(group.name) else: l.append(group) self.build_list_params(params, l, 'SecurityGroup') if user_data: params['UserData'] = base64.b64encode(user_data) if addressing_type: params['AddressingType'] = addressing_type if instance_type: params['InstanceType'] = instance_type if placement: params['Placement.AvailabilityZone'] = placement if placement_group: params['Placement.GroupName'] = placement_group if tenancy: params['Placement.Tenancy'] = tenancy if kernel_id: params['KernelId'] = kernel_id if ramdisk_id: params['RamdiskId'] = ramdisk_id if monitoring_enabled: params['Monitoring.Enabled'] = 'true' if subnet_id: params['SubnetId'] = subnet_id if private_ip_address: params['PrivateIpAddress'] = private_ip_address if block_device_map: block_device_map.build_list_params(params) if disable_api_termination: params['DisableApiTermination'] = 'true' if instance_initiated_shutdown_behavior: val = instance_initiated_shutdown_behavior params['InstanceInitiatedShutdownBehavior'] = val if client_token: params['ClientToken'] = client_token if additional_info: params['AdditionalInfo'] = additional_info if instance_profile_name: params['IamInstanceProfile.Name'] = instance_profile_name if instance_profile_arn: params['IamInstanceProfile.Arn'] = instance_profile_arn if ebs_optimized: params['EbsOptimized'] = 'true' if network_interfaces: network_interfaces.build_list_params(params) return self.get_object('RunInstances', params, Reservation, verb='POST') def terminate_instances(self, instance_ids=None): """ Terminate the instances specified :type instance_ids: list :param instance_ids: A list of strings of the Instance IDs to terminate :rtype: list :return: A list of the instances terminated """ params = {} if instance_ids: self.build_list_params(params, instance_ids, 'InstanceId') return self.get_list('TerminateInstances', params, [('item', Instance)], verb='POST') def stop_instances(self, instance_ids=None, force=False): """ Stop the instances specified :type instance_ids: list :param instance_ids: A list of strings of the Instance IDs to stop :type force: bool :param force: Forces the instance to stop :rtype: list :return: A list of the instances stopped """ params = {} if force: params['Force'] = 'true' if instance_ids: self.build_list_params(params, instance_ids, 'InstanceId') return self.get_list('StopInstances', params, [('item', Instance)], verb='POST') def start_instances(self, instance_ids=None): """ Start the instances specified :type instance_ids: list :param instance_ids: A list of strings of the Instance IDs to start :rtype: list :return: A list of the instances started """ params = {} if instance_ids: self.build_list_params(params, instance_ids, 'InstanceId') return self.get_list('StartInstances', params, [('item', Instance)], verb='POST') def get_console_output(self, instance_id): """ Retrieves the console output for the specified instance. :type instance_id: string :param instance_id: The instance ID of a running instance on the cloud. :rtype: :class:`boto.ec2.instance.ConsoleOutput` :return: The console output as a ConsoleOutput object """ params = {} self.build_list_params(params, [instance_id], 'InstanceId') return self.get_object('GetConsoleOutput', params, ConsoleOutput, verb='POST') def reboot_instances(self, instance_ids=None): """ Reboot the specified instances. :type instance_ids: list :param instance_ids: The instances to terminate and reboot """ params = {} if instance_ids: self.build_list_params(params, instance_ids, 'InstanceId') return self.get_status('RebootInstances', params) def confirm_product_instance(self, product_code, instance_id): params = {'ProductCode': product_code, 'InstanceId': instance_id} rs = self.get_object('ConfirmProductInstance', params, ResultSet, verb='POST') return (rs.status, rs.ownerId) # InstanceAttribute methods def get_instance_attribute(self, instance_id, attribute): """ Gets an attribute from an instance. :type instance_id: string :param instance_id: The Amazon id of the instance :type attribute: string :param attribute: The attribute you need information about Valid choices are: * instanceType * kernel * ramdisk * userData * disableApiTermination * instanceInitiatedShutdownBehavior * rootDeviceName * blockDeviceMapping * productCodes * sourceDestCheck * groupSet * ebsOptimized :rtype: :class:`boto.ec2.image.InstanceAttribute` :return: An InstanceAttribute object representing the value of the attribute requested """ params = {'InstanceId': instance_id} if attribute: params['Attribute'] = attribute return self.get_object('DescribeInstanceAttribute', params, InstanceAttribute, verb='POST') def modify_instance_attribute(self, instance_id, attribute, value): """ Changes an attribute of an instance :type instance_id: string :param instance_id: The instance id you wish to change :type attribute: string :param attribute: The attribute you wish to change. * instanceType - A valid instance type (m1.small) * kernel - Kernel ID (None) * ramdisk - Ramdisk ID (None) * userData - Base64 encoded String (None) * disableApiTermination - Boolean (true) * instanceInitiatedShutdownBehavior - stop|terminate * blockDeviceMapping - List of strings - ie: ['/dev/sda=false'] * sourceDestCheck - Boolean (true) * groupSet - Set of Security Groups or IDs * ebsOptimized - Boolean (false) :type value: string :param value: The new value for the attribute :rtype: bool :return: Whether the operation succeeded or not """ # Allow a bool to be passed in for value of disableApiTermination bool_reqs = ('disableapitermination', 'sourcedestcheck', 'ebsoptimized') if attribute.lower() in bool_reqs: if isinstance(value, bool): if value: value = 'true' else: value = 'false' params = {'InstanceId': instance_id} # groupSet is handled differently from other arguments if attribute.lower() == 'groupset': for idx, sg in enumerate(value): if isinstance(sg, SecurityGroup): sg = sg.id params['GroupId.%s' % (idx + 1)] = sg elif attribute.lower() == 'blockdevicemapping': for idx, kv in enumerate(value): dev_name, _, flag = kv.partition('=') pre = 'BlockDeviceMapping.%d' % (idx + 1) params['%s.DeviceName' % pre] = dev_name params['%s.Ebs.DeleteOnTermination' % pre] = flag or 'true' else: # for backwards compatibility handle lowercase first letter attribute = attribute[0].upper() + attribute[1:] params['%s.Value' % attribute] = value return self.get_status('ModifyInstanceAttribute', params, verb='POST') def reset_instance_attribute(self, instance_id, attribute): """ Resets an attribute of an instance to its default value. :type instance_id: string :param instance_id: ID of the instance :type attribute: string :param attribute: The attribute to reset. Valid values are: kernel|ramdisk :rtype: bool :return: Whether the operation succeeded or not """ params = {'InstanceId': instance_id, 'Attribute': attribute} return self.get_status('ResetInstanceAttribute', params, verb='POST') # Spot Instances def get_all_spot_instance_requests(self, request_ids=None, filters=None): """ Retrieve all the spot instances requests associated with your account. :type request_ids: list :param request_ids: A list of strings of spot instance request IDs :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.spotinstancerequest.SpotInstanceRequest` """ params = {} if request_ids: self.build_list_params(params, request_ids, 'SpotInstanceRequestId') if filters: if 'launch.group-id' in filters: lgid = filters.get('launch.group-id') if not lgid.startswith('sg-') or len(lgid) != 11: warnings.warn( "The 'launch.group-id' filter now requires a security " "group id (sg-*) and no longer supports filtering by " "group name. Please update your filters accordingly.", UserWarning) self.build_filter_params(params, filters) return self.get_list('DescribeSpotInstanceRequests', params, [('item', SpotInstanceRequest)], verb='POST') def get_spot_price_history(self, start_time=None, end_time=None, instance_type=None, product_description=None, availability_zone=None): """ Retrieve the recent history of spot instances pricing. :type start_time: str :param start_time: An indication of how far back to provide price changes for. An ISO8601 DateTime string. :type end_time: str :param end_time: An indication of how far forward to provide price changes for. An ISO8601 DateTime string. :type instance_type: str :param instance_type: Filter responses to a particular instance type. :type product_description: str :param product_description: Filter responses to a particular platform. Valid values are currently: * Linux/UNIX * SUSE Linux * Windows * Linux/UNIX (Amazon VPC) * SUSE Linux (Amazon VPC) * Windows (Amazon VPC) :type availability_zone: str :param availability_zone: The availability zone for which prices should be returned. If not specified, data for all availability zones will be returned. :rtype: list :return: A list tuples containing price and timestamp. """ params = {} if start_time: params['StartTime'] = start_time if end_time: params['EndTime'] = end_time if instance_type: params['InstanceType'] = instance_type if product_description: params['ProductDescription'] = product_description if availability_zone: params['AvailabilityZone'] = availability_zone return self.get_list('DescribeSpotPriceHistory', params, [('item', SpotPriceHistory)], verb='POST') def request_spot_instances(self, price, image_id, count=1, type='one-time', valid_from=None, valid_until=None, launch_group=None, availability_zone_group=None, key_name=None, security_groups=None, user_data=None, addressing_type=None, instance_type='m1.small', placement=None, kernel_id=None, ramdisk_id=None, monitoring_enabled=False, subnet_id=None, placement_group=None, block_device_map=None, instance_profile_arn=None, instance_profile_name=None, security_group_ids=None, ebs_optimized=False, network_interfaces=None): """ Request instances on the spot market at a particular price. :type price: str :param price: The maximum price of your bid :type image_id: string :param image_id: The ID of the image to run :type count: int :param count: The of instances to requested :type type: str :param type: Type of request. Can be 'one-time' or 'persistent'. Default is one-time. :type valid_from: str :param valid_from: Start date of the request. An ISO8601 time string. :type valid_until: str :param valid_until: End date of the request. An ISO8601 time string. :type launch_group: str :param launch_group: If supplied, all requests will be fulfilled as a group. :type availability_zone_group: str :param availability_zone_group: If supplied, all requests will be fulfilled within a single availability zone. :type key_name: string :param key_name: The name of the key pair with which to launch instances :type security_groups: list of strings :param security_groups: The names of the security groups with which to associate instances :type user_data: string :param user_data: The user data passed to the launched instances :type instance_type: string :param instance_type: The type of instance to run: * m1.small * m1.large * m1.xlarge * c1.medium * c1.xlarge * m2.xlarge * m2.2xlarge * m2.4xlarge * cc1.4xlarge * t1.micro :type placement: string :param placement: The availability zone in which to launch the instances :type kernel_id: string :param kernel_id: The ID of the kernel with which to launch the instances :type ramdisk_id: string :param ramdisk_id: The ID of the RAM disk with which to launch the instances :type monitoring_enabled: bool :param monitoring_enabled: Enable CloudWatch monitoring on the instance. :type subnet_id: string :param subnet_id: The subnet ID within which to launch the instances for VPC. :type placement_group: string :param placement_group: If specified, this is the name of the placement group in which the instance(s) will be launched. :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` :param block_device_map: A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. :type security_group_ids: list of strings :param security_group_ids: The ID of the VPC security groups with which to associate instances. :type instance_profile_arn: string :param instance_profile_arn: The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. :type instance_profile_name: string :param instance_profile_name: The name of the IAM Instance Profile (IIP) to associate with the instances. :type ebs_optimized: bool :param ebs_optimized: Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. :type network_interfaces: list :param network_interfaces: A list of :class:`boto.ec2.networkinterface.NetworkInterfaceSpecification` :rtype: Reservation :return: The :class:`boto.ec2.spotinstancerequest.SpotInstanceRequest` associated with the request for machines """ ls = 'LaunchSpecification' params = {'%s.ImageId' % ls: image_id, 'Type': type, 'SpotPrice': price} if count: params['InstanceCount'] = count if valid_from: params['ValidFrom'] = valid_from if valid_until: params['ValidUntil'] = valid_until if launch_group: params['LaunchGroup'] = launch_group if availability_zone_group: params['AvailabilityZoneGroup'] = availability_zone_group if key_name: params['%s.KeyName' % ls] = key_name if security_group_ids: l = [] for group in security_group_ids: if isinstance(group, SecurityGroup): l.append(group.id) else: l.append(group) self.build_list_params(params, l, '%s.SecurityGroupId' % ls) if security_groups: l = [] for group in security_groups: if isinstance(group, SecurityGroup): l.append(group.name) else: l.append(group) self.build_list_params(params, l, '%s.SecurityGroup' % ls) if user_data: params['%s.UserData' % ls] = base64.b64encode(user_data) if addressing_type: params['%s.AddressingType' % ls] = addressing_type if instance_type: params['%s.InstanceType' % ls] = instance_type if placement: params['%s.Placement.AvailabilityZone' % ls] = placement if kernel_id: params['%s.KernelId' % ls] = kernel_id if ramdisk_id: params['%s.RamdiskId' % ls] = ramdisk_id if monitoring_enabled: params['%s.Monitoring.Enabled' % ls] = 'true' if subnet_id: params['%s.SubnetId' % ls] = subnet_id if placement_group: params['%s.Placement.GroupName' % ls] = placement_group if block_device_map: block_device_map.build_list_params(params, '%s.' % ls) if instance_profile_name: params['%s.IamInstanceProfile.Name' % ls] = instance_profile_name if instance_profile_arn: params['%s.IamInstanceProfile.Arn' % ls] = instance_profile_arn if ebs_optimized: params['%s.EbsOptimized' % ls] = 'true' if network_interfaces: network_interfaces.build_list_params(params, prefix=ls + '.') return self.get_list('RequestSpotInstances', params, [('item', SpotInstanceRequest)], verb='POST') def cancel_spot_instance_requests(self, request_ids): """ Cancel the specified Spot Instance Requests. :type request_ids: list :param request_ids: A list of strings of the Request IDs to terminate :rtype: list :return: A list of the instances terminated """ params = {} if request_ids: self.build_list_params(params, request_ids, 'SpotInstanceRequestId') return self.get_list('CancelSpotInstanceRequests', params, [('item', Instance)], verb='POST') def get_spot_datafeed_subscription(self): """ Return the current spot instance data feed subscription associated with this account, if any. :rtype: :class:`boto.ec2.spotdatafeedsubscription.SpotDatafeedSubscription` :return: The datafeed subscription object or None """ return self.get_object('DescribeSpotDatafeedSubscription', None, SpotDatafeedSubscription, verb='POST') def create_spot_datafeed_subscription(self, bucket, prefix): """ Create a spot instance datafeed subscription for this account. :type bucket: str or unicode :param bucket: The name of the bucket where spot instance data will be written. The account issuing this request must have FULL_CONTROL access to the bucket specified in the request. :type prefix: str or unicode :param prefix: An optional prefix that will be pre-pended to all data files written to the bucket. :rtype: :class:`boto.ec2.spotdatafeedsubscription.SpotDatafeedSubscription` :return: The datafeed subscription object or None """ params = {'Bucket': bucket} if prefix: params['Prefix'] = prefix return self.get_object('CreateSpotDatafeedSubscription', params, SpotDatafeedSubscription, verb='POST') def delete_spot_datafeed_subscription(self): """ Delete the current spot instance data feed subscription associated with this account :rtype: bool :return: True if successful """ return self.get_status('DeleteSpotDatafeedSubscription', None, verb='POST') # Zone methods def get_all_zones(self, zones=None, filters=None): """ Get all Availability Zones associated with the current region. :type zones: list :param zones: Optional list of zones. If this list is present, only the Zones associated with these zone names will be returned. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list of :class:`boto.ec2.zone.Zone` :return: The requested Zone objects """ params = {} if zones: self.build_list_params(params, zones, 'ZoneName') if filters: self.build_filter_params(params, filters) return self.get_list('DescribeAvailabilityZones', params, [('item', Zone)], verb='POST') # Address methods def get_all_addresses(self, addresses=None, filters=None, allocation_ids=None): """ Get all EIP's associated with the current credentials. :type addresses: list :param addresses: Optional list of addresses. If this list is present, only the Addresses associated with these addresses will be returned. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :type allocation_ids: list :param allocation_ids: Optional list of allocation IDs. If this list is present, only the Addresses associated with the given allocation IDs will be returned. :rtype: list of :class:`boto.ec2.address.Address` :return: The requested Address objects """ params = {} if addresses: self.build_list_params(params, addresses, 'PublicIp') if allocation_ids: self.build_list_params(params, allocation_ids, 'AllocationId') if filters: self.build_filter_params(params, filters) return self.get_list('DescribeAddresses', params, [('item', Address)], verb='POST') def allocate_address(self, domain=None): """ Allocate a new Elastic IP address and associate it with your account. :type domain: string :param domain: Optional string. If domain is set to "vpc" the address will be allocated to VPC . Will return address object with allocation_id. :rtype: :class:`boto.ec2.address.Address` :return: The newly allocated Address """ params = {} if domain is not None: params['Domain'] = domain return self.get_object('AllocateAddress', params, Address, verb='POST') def assign_private_ip_addresses(self, network_interface_id=None, private_ip_addresses=None, secondary_private_ip_address_count=None, allow_reassignment=False): """ Assigns one or more secondary private IP addresses to a network interface in Amazon VPC. :type network_interface_id: string :param network_interface_id: The network interface to which the IP address will be assigned. :type private_ip_addresses: list :param private_ip_addresses: Assigns the specified IP addresses as secondary IP addresses to the network interface. :type secondary_private_ip_address_count: int :param secondary_private_ip_address_count: The number of secondary IP addresses to assign to the network interface. You cannot specify this parameter when also specifying private_ip_addresses. :type allow_reassignment: bool :param allow_reassignment: Specifies whether to allow an IP address that is already assigned to another network interface or instance to be reassigned to the specified network interface. :rtype: bool :return: True if successful """ params = {} if network_interface_id is not None: params['NetworkInterfaceId'] = network_interface_id if private_ip_addresses is not None: self.build_list_params(params, private_ip_addresses, 'PrivateIpAddress') elif secondary_private_ip_address_count is not None: params['SecondaryPrivateIpAddressCount'] = \ secondary_private_ip_address_count if allow_reassignment: params['AllowReassignment'] = 'true' return self.get_status('AssignPrivateIpAddresses', params, verb='POST') def associate_address(self, instance_id=None, public_ip=None, allocation_id=None, network_interface_id=None, private_ip_address=None, allow_reassociation=False): """ Associate an Elastic IP address with a currently running instance. This requires one of ``public_ip`` or ``allocation_id`` depending on if you're associating a VPC address or a plain EC2 address. When using an Allocation ID, make sure to pass ``None`` for ``public_ip`` as EC2 expects a single parameter and if ``public_ip`` is passed boto will preference that instead of ``allocation_id``. :type instance_id: string :param instance_id: The ID of the instance :type public_ip: string :param public_ip: The public IP address for EC2 based allocations. :type allocation_id: string :param allocation_id: The allocation ID for a VPC-based elastic IP. :type network_interface_id: string :param network_interface_id: The network interface ID to which elastic IP is to be assigned to :type private_ip_address: string :param private_ip_address: The primary or secondary private IP address to associate with the Elastic IP address. :type allow_reassociation: bool :param allow_reassociation: Specify this option to allow an Elastic IP address that is already associated with another network interface or instance to be re-associated with the specified instance or interface. :rtype: bool :return: True if successful """ params = {} if instance_id is not None: params['InstanceId'] = instance_id elif network_interface_id is not None: params['NetworkInterfaceId'] = network_interface_id if public_ip is not None: params['PublicIp'] = public_ip elif allocation_id is not None: params['AllocationId'] = allocation_id if private_ip_address is not None: params['PrivateIpAddress'] = private_ip_address if allow_reassociation: params['AllowReassociation'] = 'true' return self.get_status('AssociateAddress', params, verb='POST') def disassociate_address(self, public_ip=None, association_id=None): """ Disassociate an Elastic IP address from a currently running instance. :type public_ip: string :param public_ip: The public IP address for EC2 elastic IPs. :type association_id: string :param association_id: The association ID for a VPC based elastic ip. :rtype: bool :return: True if successful """ params = {} if public_ip is not None: params['PublicIp'] = public_ip elif association_id is not None: params['AssociationId'] = association_id return self.get_status('DisassociateAddress', params, verb='POST') def release_address(self, public_ip=None, allocation_id=None): """ Free up an Elastic IP address. Pass a public IP address to release an EC2 Elastic IP address and an AllocationId to release a VPC Elastic IP address. You should only pass one value. This requires one of ``public_ip`` or ``allocation_id`` depending on if you're associating a VPC address or a plain EC2 address. When using an Allocation ID, make sure to pass ``None`` for ``public_ip`` as EC2 expects a single parameter and if ``public_ip`` is passed boto will preference that instead of ``allocation_id``. :type public_ip: string :param public_ip: The public IP address for EC2 elastic IPs. :type allocation_id: string :param allocation_id: The Allocation ID for VPC elastic IPs. :rtype: bool :return: True if successful """ params = {} if public_ip is not None: params['PublicIp'] = public_ip elif allocation_id is not None: params['AllocationId'] = allocation_id return self.get_status('ReleaseAddress', params, verb='POST') def unassign_private_ip_addresses(self, network_interface_id=None, private_ip_addresses=None): """ Unassigns one or more secondary private IP addresses from a network interface in Amazon VPC. :type network_interface_id: string :param network_interface_id: The network interface from which the secondary private IP address will be unassigned. :type private_ip_addresses: list :param private_ip_addresses: Specifies the secondary private IP addresses that you want to unassign from the network interface. :rtype: bool :return: True if successful """ params = {} if network_interface_id is not None: params['NetworkInterfaceId'] = network_interface_id if private_ip_addresses is not None: self.build_list_params(params, private_ip_addresses, 'PrivateIpAddress') return self.get_status('UnassignPrivateIpAddresses', params, verb='POST') # Volume methods def get_all_volumes(self, volume_ids=None, filters=None): """ Get all Volumes associated with the current credentials. :type volume_ids: list :param volume_ids: Optional list of volume ids. If this list is present, only the volumes associated with these volume ids will be returned. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list of :class:`boto.ec2.volume.Volume` :return: The requested Volume objects """ params = {} if volume_ids: self.build_list_params(params, volume_ids, 'VolumeId') if filters: self.build_filter_params(params, filters) return self.get_list('DescribeVolumes', params, [('item', Volume)], verb='POST') def get_all_volume_status(self, volume_ids=None, max_results=None, next_token=None, filters=None): """ Retrieve the status of one or more volumes. :type volume_ids: list :param volume_ids: A list of strings of volume IDs :type max_results: int :param max_results: The maximum number of paginated instance items per response. :type next_token: str :param next_token: A string specifying the next paginated set of results to return. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of volume status. """ params = {} if volume_ids: self.build_list_params(params, volume_ids, 'VolumeId') if max_results: params['MaxResults'] = max_results if next_token: params['NextToken'] = next_token if filters: self.build_filter_params(params, filters) return self.get_object('DescribeVolumeStatus', params, VolumeStatusSet, verb='POST') def enable_volume_io(self, volume_id): """ Enables I/O operations for a volume that had I/O operations disabled because the data on the volume was potentially inconsistent. :type volume_id: str :param volume_id: The ID of the volume. :rtype: bool :return: True if successful """ params = {'VolumeId': volume_id} return self.get_status('EnableVolumeIO', params, verb='POST') def get_volume_attribute(self, volume_id, attribute='autoEnableIO'): """ Describes attribute of the volume. :type volume_id: str :param volume_id: The ID of the volume. :type attribute: str :param attribute: The requested attribute. Valid values are: * autoEnableIO :rtype: list of :class:`boto.ec2.volume.VolumeAttribute` :return: The requested Volume attribute """ params = {'VolumeId': volume_id, 'Attribute': attribute} return self.get_object('DescribeVolumeAttribute', params, VolumeAttribute, verb='POST') def modify_volume_attribute(self, volume_id, attribute, new_value): """ Changes an attribute of an Volume. :type volume_id: string :param volume_id: The volume id you wish to change :type attribute: string :param attribute: The attribute you wish to change. Valid values are: AutoEnableIO. :type new_value: string :param new_value: The new value of the attribute. """ params = {'VolumeId': volume_id} if attribute == 'AutoEnableIO': params['AutoEnableIO.Value'] = new_value return self.get_status('ModifyVolumeAttribute', params, verb='POST') def create_volume(self, size, zone, snapshot=None, volume_type=None, iops=None): """ Create a new EBS Volume. :type size: int :param size: The size of the new volume, in GiB :type zone: string or :class:`boto.ec2.zone.Zone` :param zone: The availability zone in which the Volume will be created. :type snapshot: string or :class:`boto.ec2.snapshot.Snapshot` :param snapshot: The snapshot from which the new Volume will be created. :type volume_type: string :param volume_type: The type of the volume. (optional). Valid values are: standard | io1. :type iops: int :param iops: The provisioned IOPs you want to associate with this volume. (optional) """ if isinstance(zone, Zone): zone = zone.name params = {'AvailabilityZone': zone} if size: params['Size'] = size if snapshot: if isinstance(snapshot, Snapshot): snapshot = snapshot.id params['SnapshotId'] = snapshot if volume_type: params['VolumeType'] = volume_type if iops: params['Iops'] = str(iops) return self.get_object('CreateVolume', params, Volume, verb='POST') def delete_volume(self, volume_id): """ Delete an EBS volume. :type volume_id: str :param volume_id: The ID of the volume to be delete. :rtype: bool :return: True if successful """ params = {'VolumeId': volume_id} return self.get_status('DeleteVolume', params, verb='POST') def attach_volume(self, volume_id, instance_id, device): """ Attach an EBS volume to an EC2 instance. :type volume_id: str :param volume_id: The ID of the EBS volume to be attached. :type instance_id: str :param instance_id: The ID of the EC2 instance to which it will be attached. :type device: str :param device: The device on the instance through which the volume will be exposted (e.g. /dev/sdh) :rtype: bool :return: True if successful """ params = {'InstanceId': instance_id, 'VolumeId': volume_id, 'Device': device} return self.get_status('AttachVolume', params, verb='POST') def detach_volume(self, volume_id, instance_id=None, device=None, force=False): """ Detach an EBS volume from an EC2 instance. :type volume_id: str :param volume_id: The ID of the EBS volume to be attached. :type instance_id: str :param instance_id: The ID of the EC2 instance from which it will be detached. :type device: str :param device: The device on the instance through which the volume is exposted (e.g. /dev/sdh) :type force: bool :param force: Forces detachment if the previous detachment attempt did not occur cleanly. This option can lead to data loss or a corrupted file system. Use this option only as a last resort to detach a volume from a failed instance. The instance will not have an opportunity to flush file system caches nor file system meta data. If you use this option, you must perform file system check and repair procedures. :rtype: bool :return: True if successful """ params = {'VolumeId': volume_id} if instance_id: params['InstanceId'] = instance_id if device: params['Device'] = device if force: params['Force'] = 'true' return self.get_status('DetachVolume', params, verb='POST') # Snapshot methods def get_all_snapshots(self, snapshot_ids=None, owner=None, restorable_by=None, filters=None): """ Get all EBS Snapshots associated with the current credentials. :type snapshot_ids: list :param snapshot_ids: Optional list of snapshot ids. If this list is present, only the Snapshots associated with these snapshot ids will be returned. :type owner: str :param owner: If present, only the snapshots owned by the specified user will be returned. Valid values are: * self * amazon * AWS Account ID :type restorable_by: str :param restorable_by: If present, only the snapshots that are restorable by the specified account id will be returned. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list of :class:`boto.ec2.snapshot.Snapshot` :return: The requested Snapshot objects """ params = {} if snapshot_ids: self.build_list_params(params, snapshot_ids, 'SnapshotId') if owner: params['Owner'] = owner if restorable_by: params['RestorableBy'] = restorable_by if filters: self.build_filter_params(params, filters) return self.get_list('DescribeSnapshots', params, [('item', Snapshot)], verb='POST') def create_snapshot(self, volume_id, description=None): """ Create a snapshot of an existing EBS Volume. :type volume_id: str :param volume_id: The ID of the volume to be snapshot'ed :type description: str :param description: A description of the snapshot. Limited to 255 characters. :rtype: :class:`boto.ec2.snapshot.Snapshot` :return: The created Snapshot object """ params = {'VolumeId': volume_id} if description: params['Description'] = description[0:255] snapshot = self.get_object('CreateSnapshot', params, Snapshot, verb='POST') volume = self.get_all_volumes([volume_id])[0] volume_name = volume.tags.get('Name') if volume_name: snapshot.add_tag('Name', volume_name) return snapshot def delete_snapshot(self, snapshot_id): params = {'SnapshotId': snapshot_id} return self.get_status('DeleteSnapshot', params, verb='POST') def copy_snapshot(self, source_region, source_snapshot_id, description=None): """ Copies a point-in-time snapshot of an Amazon Elastic Block Store (Amazon EBS) volume and stores it in Amazon Simple Storage Service (Amazon S3). You can copy the snapshot within the same region or from one region to another. You can use the snapshot to create new Amazon EBS volumes or Amazon Machine Images (AMIs). :type source_region: str :param source_region: The ID of the AWS region that contains the snapshot to be copied (e.g 'us-east-1', 'us-west-2', etc.). :type source_snapshot_id: str :param source_snapshot_id: The ID of the Amazon EBS snapshot to copy :type description: str :param description: A description of the new Amazon EBS snapshot. :rtype: str :return: The snapshot ID """ params = { 'SourceRegion': source_region, 'SourceSnapshotId': source_snapshot_id, } if description is not None: params['Description'] = description snapshot = self.get_object('CopySnapshot', params, Snapshot, verb='POST') return snapshot.id def trim_snapshots(self, hourly_backups=8, daily_backups=7, weekly_backups=4): """ Trim excess snapshots, based on when they were taken. More current snapshots are retained, with the number retained decreasing as you move back in time. If ebs volumes have a 'Name' tag with a value, their snapshots will be assigned the same tag when they are created. The values of the 'Name' tags for snapshots are used by this function to group snapshots taken from the same volume (or from a series of like-named volumes over time) for trimming. For every group of like-named snapshots, this function retains the newest and oldest snapshots, as well as, by default, the first snapshots taken in each of the last eight hours, the first snapshots taken in each of the last seven days, the first snapshots taken in the last 4 weeks (counting Midnight Sunday morning as the start of the week), and the first snapshot from the first Sunday of each month forever. :type hourly_backups: int :param hourly_backups: How many recent hourly backups should be saved. :type daily_backups: int :param daily_backups: How many recent daily backups should be saved. :type weekly_backups: int :param weekly_backups: How many recent weekly backups should be saved. """ # This function first builds up an ordered list of target times # that snapshots should be saved for (last 8 hours, last 7 days, etc.). # Then a map of snapshots is constructed, with the keys being # the snapshot / volume names and the values being arrays of # chronologically sorted snapshots. # Finally, for each array in the map, we go through the snapshot # array and the target time array in an interleaved fashion, # deleting snapshots whose start_times don't immediately follow a # target time (we delete a snapshot if there's another snapshot # that was made closer to the preceding target time). now = datetime.utcnow() last_hour = datetime(now.year, now.month, now.day, now.hour) last_midnight = datetime(now.year, now.month, now.day) last_sunday = datetime(now.year, now.month, now.day) - timedelta(days = (now.weekday() + 1) % 7) start_of_month = datetime(now.year, now.month, 1) target_backup_times = [] # there are no snapshots older than 1/1/2007 oldest_snapshot_date = datetime(2007, 1, 1) for hour in range(0, hourly_backups): target_backup_times.append(last_hour - timedelta(hours = hour)) for day in range(0, daily_backups): target_backup_times.append(last_midnight - timedelta(days = day)) for week in range(0, weekly_backups): target_backup_times.append(last_sunday - timedelta(weeks = week)) one_day = timedelta(days = 1) while start_of_month > oldest_snapshot_date: # append the start of the month to the list of # snapshot dates to save: target_backup_times.append(start_of_month) # there's no timedelta setting for one month, so instead: # decrement the day by one, so we go to the final day of # the previous month... start_of_month -= one_day # ... and then go to the first day of that previous month: start_of_month = datetime(start_of_month.year, start_of_month.month, 1) temp = [] for t in target_backup_times: if temp.__contains__(t) == False: temp.append(t) # sort to make the oldest dates first, and make sure the month start # and last four week's start are in the proper order target_backup_times = sorted(temp) # get all the snapshots, sort them by date and time, and # organize them into one array for each volume: all_snapshots = self.get_all_snapshots(owner = 'self') all_snapshots.sort(cmp = lambda x, y: cmp(x.start_time, y.start_time)) snaps_for_each_volume = {} for snap in all_snapshots: # the snapshot name and the volume name are the same. # The snapshot name is set from the volume # name at the time the snapshot is taken volume_name = snap.tags.get('Name') if volume_name: # only examine snapshots that have a volume name snaps_for_volume = snaps_for_each_volume.get(volume_name) if not snaps_for_volume: snaps_for_volume = [] snaps_for_each_volume[volume_name] = snaps_for_volume snaps_for_volume.append(snap) # Do a running comparison of snapshot dates to desired time #periods, keeping the oldest snapshot in each # time period and deleting the rest: for volume_name in snaps_for_each_volume: snaps = snaps_for_each_volume[volume_name] snaps = snaps[:-1] # never delete the newest snapshot time_period_number = 0 snap_found_for_this_time_period = False for snap in snaps: check_this_snap = True while check_this_snap and time_period_number < target_backup_times.__len__(): snap_date = datetime.strptime(snap.start_time, '%Y-%m-%dT%H:%M:%S.000Z') if snap_date < target_backup_times[time_period_number]: # the snap date is before the cutoff date. # Figure out if it's the first snap in this # date range and act accordingly (since both #date the date ranges and the snapshots # are sorted chronologically, we know this #snapshot isn't in an earlier date range): if snap_found_for_this_time_period == True: if not snap.tags.get('preserve_snapshot'): # as long as the snapshot wasn't marked # with the 'preserve_snapshot' tag, delete it: try: self.delete_snapshot(snap.id) boto.log.info('Trimmed snapshot %s (%s)' % (snap.tags['Name'], snap.start_time)) except EC2ResponseError: boto.log.error('Attempt to trim snapshot %s (%s) failed. Possible result of a race condition with trimming on another server?' % (snap.tags['Name'], snap.start_time)) # go on and look at the next snapshot, #leaving the time period alone else: # this was the first snapshot found for this #time period. Leave it alone and look at the # next snapshot: snap_found_for_this_time_period = True check_this_snap = False else: # the snap is after the cutoff date. Check it # against the next cutoff date time_period_number += 1 snap_found_for_this_time_period = False def get_snapshot_attribute(self, snapshot_id, attribute='createVolumePermission'): """ Get information about an attribute of a snapshot. Only one attribute can be specified per call. :type snapshot_id: str :param snapshot_id: The ID of the snapshot. :type attribute: str :param attribute: The requested attribute. Valid values are: * createVolumePermission :rtype: list of :class:`boto.ec2.snapshotattribute.SnapshotAttribute` :return: The requested Snapshot attribute """ params = {'Attribute': attribute} if snapshot_id: params['SnapshotId'] = snapshot_id return self.get_object('DescribeSnapshotAttribute', params, SnapshotAttribute, verb='POST') def modify_snapshot_attribute(self, snapshot_id, attribute='createVolumePermission', operation='add', user_ids=None, groups=None): """ Changes an attribute of an image. :type snapshot_id: string :param snapshot_id: The snapshot id you wish to change :type attribute: string :param attribute: The attribute you wish to change. Valid values are: createVolumePermission :type operation: string :param operation: Either add or remove (this is required for changing snapshot ermissions) :type user_ids: list :param user_ids: The Amazon IDs of users to add/remove attributes :type groups: list :param groups: The groups to add/remove attributes. The only valid value at this time is 'all'. """ params = {'SnapshotId': snapshot_id, 'Attribute': attribute, 'OperationType': operation} if user_ids: self.build_list_params(params, user_ids, 'UserId') if groups: self.build_list_params(params, groups, 'UserGroup') return self.get_status('ModifySnapshotAttribute', params, verb='POST') def reset_snapshot_attribute(self, snapshot_id, attribute='createVolumePermission'): """ Resets an attribute of a snapshot to its default value. :type snapshot_id: string :param snapshot_id: ID of the snapshot :type attribute: string :param attribute: The attribute to reset :rtype: bool :return: Whether the operation succeeded or not """ params = {'SnapshotId': snapshot_id, 'Attribute': attribute} return self.get_status('ResetSnapshotAttribute', params, verb='POST') # Keypair methods def get_all_key_pairs(self, keynames=None, filters=None): """ Get all key pairs associated with your account. :type keynames: list :param keynames: A list of the names of keypairs to retrieve. If not provided, all key pairs will be returned. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.keypair.KeyPair` """ params = {} if keynames: self.build_list_params(params, keynames, 'KeyName') if filters: self.build_filter_params(params, filters) return self.get_list('DescribeKeyPairs', params, [('item', KeyPair)], verb='POST') def get_key_pair(self, keyname): """ Convenience method to retrieve a specific keypair (KeyPair). :type image_id: string :param image_id: the ID of the Image to retrieve :rtype: :class:`boto.ec2.keypair.KeyPair` :return: The KeyPair specified or None if it is not found """ try: return self.get_all_key_pairs(keynames=[keyname])[0] except self.ResponseError, e: if e.code == 'InvalidKeyPair.NotFound': return None else: raise def create_key_pair(self, key_name): """ Create a new key pair for your account. This will create the key pair within the region you are currently connected to. :type key_name: string :param key_name: The name of the new keypair :rtype: :class:`boto.ec2.keypair.KeyPair` :return: The newly created :class:`boto.ec2.keypair.KeyPair`. The material attribute of the new KeyPair object will contain the the unencrypted PEM encoded RSA private key. """ params = {'KeyName': key_name} return self.get_object('CreateKeyPair', params, KeyPair, verb='POST') def delete_key_pair(self, key_name): """ Delete a key pair from your account. :type key_name: string :param key_name: The name of the keypair to delete """ params = {'KeyName': key_name} return self.get_status('DeleteKeyPair', params, verb='POST') def import_key_pair(self, key_name, public_key_material): """ mports the public key from an RSA key pair that you created with a third-party tool. Supported formats: * OpenSSH public key format (e.g., the format in ~/.ssh/authorized_keys) * Base64 encoded DER format * SSH public key file format as specified in RFC4716 DSA keys are not supported. Make sure your key generator is set up to create RSA keys. Supported lengths: 1024, 2048, and 4096. :type key_name: string :param key_name: The name of the new keypair :type public_key_material: string :param public_key_material: The public key. You must base64 encode the public key material before sending it to AWS. :rtype: :class:`boto.ec2.keypair.KeyPair` :return: The newly created :class:`boto.ec2.keypair.KeyPair`. The material attribute of the new KeyPair object will contain the the unencrypted PEM encoded RSA private key. """ public_key_material = base64.b64encode(public_key_material) params = {'KeyName': key_name, 'PublicKeyMaterial': public_key_material} return self.get_object('ImportKeyPair', params, KeyPair, verb='POST') # SecurityGroup methods def get_all_security_groups(self, groupnames=None, group_ids=None, filters=None): """ Get all security groups associated with your account in a region. :type groupnames: list :param groupnames: A list of the names of security groups to retrieve. If not provided, all security groups will be returned. :type group_ids: list :param group_ids: A list of IDs of security groups to retrieve for security groups within a VPC. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.securitygroup.SecurityGroup` """ params = {} if groupnames is not None: self.build_list_params(params, groupnames, 'GroupName') if group_ids is not None: self.build_list_params(params, group_ids, 'GroupId') if filters is not None: self.build_filter_params(params, filters) return self.get_list('DescribeSecurityGroups', params, [('item', SecurityGroup)], verb='POST') def create_security_group(self, name, description, vpc_id=None): """ Create a new security group for your account. This will create the security group within the region you are currently connected to. :type name: string :param name: The name of the new security group :type description: string :param description: The description of the new security group :type vpc_id: string :param vpc_id: The ID of the VPC to create the security group in, if any. :rtype: :class:`boto.ec2.securitygroup.SecurityGroup` :return: The newly created :class:`boto.ec2.securitygroup.SecurityGroup`. """ params = {'GroupName': name, 'GroupDescription': description} if vpc_id is not None: params['VpcId'] = vpc_id group = self.get_object('CreateSecurityGroup', params, SecurityGroup, verb='POST') group.name = name group.description = description if vpc_id is not None: group.vpc_id = vpc_id return group def delete_security_group(self, name=None, group_id=None): """ Delete a security group from your account. :type name: string :param name: The name of the security group to delete. :type group_id: string :param group_id: The ID of the security group to delete within a VPC. :rtype: bool :return: True if successful. """ params = {} if name is not None: params['GroupName'] = name elif group_id is not None: params['GroupId'] = group_id return self.get_status('DeleteSecurityGroup', params, verb='POST') def authorize_security_group_deprecated(self, group_name, src_security_group_name=None, src_security_group_owner_id=None, ip_protocol=None, from_port=None, to_port=None, cidr_ip=None): """ NOTE: This method uses the old-style request parameters that did not allow a port to be specified when authorizing a group. :type group_name: string :param group_name: The name of the security group you are adding the rule to. :type src_security_group_name: string :param src_security_group_name: The name of the security group you are granting access to. :type src_security_group_owner_id: string :param src_security_group_owner_id: The ID of the owner of the security group you are granting access to. :type ip_protocol: string :param ip_protocol: Either tcp | udp | icmp :type from_port: int :param from_port: The beginning port number you are enabling :type to_port: int :param to_port: The ending port number you are enabling :type to_port: string :param to_port: The CIDR block you are providing access to. See http://goo.gl/Yj5QC :rtype: bool :return: True if successful. """ params = {'GroupName':group_name} if src_security_group_name: params['SourceSecurityGroupName'] = src_security_group_name if src_security_group_owner_id: params['SourceSecurityGroupOwnerId'] = src_security_group_owner_id if ip_protocol: params['IpProtocol'] = ip_protocol if from_port: params['FromPort'] = from_port if to_port: params['ToPort'] = to_port if cidr_ip: params['CidrIp'] = cidr_ip return self.get_status('AuthorizeSecurityGroupIngress', params) def authorize_security_group(self, group_name=None, src_security_group_name=None, src_security_group_owner_id=None, ip_protocol=None, from_port=None, to_port=None, cidr_ip=None, group_id=None, src_security_group_group_id=None): """ Add a new rule to an existing security group. You need to pass in either src_security_group_name and src_security_group_owner_id OR ip_protocol, from_port, to_port, and cidr_ip. In other words, either you are authorizing another group or you are authorizing some ip-based rule. :type group_name: string :param group_name: The name of the security group you are adding the rule to. :type src_security_group_name: string :param src_security_group_name: The name of the security group you are granting access to. :type src_security_group_owner_id: string :param src_security_group_owner_id: The ID of the owner of the security group you are granting access to. :type ip_protocol: string :param ip_protocol: Either tcp | udp | icmp :type from_port: int :param from_port: The beginning port number you are enabling :type to_port: int :param to_port: The ending port number you are enabling :type cidr_ip: string or list of strings :param cidr_ip: The CIDR block you are providing access to. See http://goo.gl/Yj5QC :type group_id: string :param group_id: ID of the EC2 or VPC security group to modify. This is required for VPC security groups and can be used instead of group_name for EC2 security groups. :type src_security_group_group_id: string :param src_security_group_group_id: The ID of the security group you are granting access to. Can be used instead of src_security_group_name :rtype: bool :return: True if successful. """ if src_security_group_name: if from_port is None and to_port is None and ip_protocol is None: return self.authorize_security_group_deprecated( group_name, src_security_group_name, src_security_group_owner_id) params = {} if group_name: params['GroupName'] = group_name if group_id: params['GroupId'] = group_id if src_security_group_name: param_name = 'IpPermissions.1.Groups.1.GroupName' params[param_name] = src_security_group_name if src_security_group_owner_id: param_name = 'IpPermissions.1.Groups.1.UserId' params[param_name] = src_security_group_owner_id if src_security_group_group_id: param_name = 'IpPermissions.1.Groups.1.GroupId' params[param_name] = src_security_group_group_id if ip_protocol: params['IpPermissions.1.IpProtocol'] = ip_protocol if from_port is not None: params['IpPermissions.1.FromPort'] = from_port if to_port is not None: params['IpPermissions.1.ToPort'] = to_port if cidr_ip: if not isinstance(cidr_ip, list): cidr_ip = [cidr_ip] for i, single_cidr_ip in enumerate(cidr_ip): params['IpPermissions.1.IpRanges.%d.CidrIp' % (i+1)] = \ single_cidr_ip return self.get_status('AuthorizeSecurityGroupIngress', params, verb='POST') def authorize_security_group_egress(self, group_id, ip_protocol, from_port=None, to_port=None, src_group_id=None, cidr_ip=None): """ The action adds one or more egress rules to a VPC security group. Specifically, this action permits instances in a security group to send traffic to one or more destination CIDR IP address ranges, or to one or more destination security groups in the same VPC. """ params = { 'GroupId': group_id, 'IpPermissions.1.IpProtocol': ip_protocol } if from_port is not None: params['IpPermissions.1.FromPort'] = from_port if to_port is not None: params['IpPermissions.1.ToPort'] = to_port if src_group_id is not None: params['IpPermissions.1.Groups.1.GroupId'] = src_group_id if cidr_ip is not None: params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip return self.get_status('AuthorizeSecurityGroupEgress', params, verb='POST') def revoke_security_group_deprecated(self, group_name, src_security_group_name=None, src_security_group_owner_id=None, ip_protocol=None, from_port=None, to_port=None, cidr_ip=None): """ NOTE: This method uses the old-style request parameters that did not allow a port to be specified when authorizing a group. Remove an existing rule from an existing security group. You need to pass in either src_security_group_name and src_security_group_owner_id OR ip_protocol, from_port, to_port, and cidr_ip. In other words, either you are revoking another group or you are revoking some ip-based rule. :type group_name: string :param group_name: The name of the security group you are removing the rule from. :type src_security_group_name: string :param src_security_group_name: The name of the security group you are revoking access to. :type src_security_group_owner_id: string :param src_security_group_owner_id: The ID of the owner of the security group you are revoking access to. :type ip_protocol: string :param ip_protocol: Either tcp | udp | icmp :type from_port: int :param from_port: The beginning port number you are disabling :type to_port: int :param to_port: The ending port number you are disabling :type to_port: string :param to_port: The CIDR block you are revoking access to. http://goo.gl/Yj5QC :rtype: bool :return: True if successful. """ params = {'GroupName':group_name} if src_security_group_name: params['SourceSecurityGroupName'] = src_security_group_name if src_security_group_owner_id: params['SourceSecurityGroupOwnerId'] = src_security_group_owner_id if ip_protocol: params['IpProtocol'] = ip_protocol if from_port: params['FromPort'] = from_port if to_port: params['ToPort'] = to_port if cidr_ip: params['CidrIp'] = cidr_ip return self.get_status('RevokeSecurityGroupIngress', params) def revoke_security_group(self, group_name=None, src_security_group_name=None, src_security_group_owner_id=None, ip_protocol=None, from_port=None, to_port=None, cidr_ip=None, group_id=None, src_security_group_group_id=None): """ Remove an existing rule from an existing security group. You need to pass in either src_security_group_name and src_security_group_owner_id OR ip_protocol, from_port, to_port, and cidr_ip. In other words, either you are revoking another group or you are revoking some ip-based rule. :type group_name: string :param group_name: The name of the security group you are removing the rule from. :type src_security_group_name: string :param src_security_group_name: The name of the security group you are revoking access to. :type src_security_group_owner_id: string :param src_security_group_owner_id: The ID of the owner of the security group you are revoking access to. :type ip_protocol: string :param ip_protocol: Either tcp | udp | icmp :type from_port: int :param from_port: The beginning port number you are disabling :type to_port: int :param to_port: The ending port number you are disabling :type cidr_ip: string :param cidr_ip: The CIDR block you are revoking access to. See http://goo.gl/Yj5QC :type group_id: string :param group_id: ID of the EC2 or VPC security group to modify. This is required for VPC security groups and can be used instead of group_name for EC2 security groups. :type src_security_group_group_id: string :param src_security_group_group_id: The ID of the security group for which you are revoking access. Can be used instead of src_security_group_name :rtype: bool :return: True if successful. """ if src_security_group_name: if from_port is None and to_port is None and ip_protocol is None: return self.revoke_security_group_deprecated( group_name, src_security_group_name, src_security_group_owner_id) params = {} if group_name is not None: params['GroupName'] = group_name if group_id is not None: params['GroupId'] = group_id if src_security_group_name: param_name = 'IpPermissions.1.Groups.1.GroupName' params[param_name] = src_security_group_name if src_security_group_group_id: param_name = 'IpPermissions.1.Groups.1.GroupId' params[param_name] = src_security_group_group_id if src_security_group_owner_id: param_name = 'IpPermissions.1.Groups.1.UserId' params[param_name] = src_security_group_owner_id if ip_protocol: params['IpPermissions.1.IpProtocol'] = ip_protocol if from_port is not None: params['IpPermissions.1.FromPort'] = from_port if to_port is not None: params['IpPermissions.1.ToPort'] = to_port if cidr_ip: params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip return self.get_status('RevokeSecurityGroupIngress', params, verb='POST') def revoke_security_group_egress(self, group_id, ip_protocol, from_port=None, to_port=None, src_group_id=None, cidr_ip=None): """ Remove an existing egress rule from an existing VPC security group. You need to pass in an ip_protocol, from_port and to_port range only if the protocol you are using is port-based. You also need to pass in either a src_group_id or cidr_ip. :type group_name: string :param group_id: The name of the security group you are removing the rule from. :type ip_protocol: string :param ip_protocol: Either tcp | udp | icmp | -1 :type from_port: int :param from_port: The beginning port number you are disabling :type to_port: int :param to_port: The ending port number you are disabling :type src_group_id: src_group_id :param src_group_id: The source security group you are revoking access to. :type cidr_ip: string :param cidr_ip: The CIDR block you are revoking access to. See http://goo.gl/Yj5QC :rtype: bool :return: True if successful. """ params = {} if group_id: params['GroupId'] = group_id if ip_protocol: params['IpPermissions.1.IpProtocol'] = ip_protocol if from_port is not None: params['IpPermissions.1.FromPort'] = from_port if to_port is not None: params['IpPermissions.1.ToPort'] = to_port if src_group_id is not None: params['IpPermissions.1.Groups.1.GroupId'] = src_group_id if cidr_ip: params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip return self.get_status('RevokeSecurityGroupEgress', params, verb='POST') # # Regions # def get_all_regions(self, region_names=None, filters=None): """ Get all available regions for the EC2 service. :type region_names: list of str :param region_names: Names of regions to limit output :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.regioninfo.RegionInfo` """ params = {} if region_names: self.build_list_params(params, region_names, 'RegionName') if filters: self.build_filter_params(params, filters) regions = self.get_list('DescribeRegions', params, [('item', RegionInfo)], verb='POST') for region in regions: region.connection_cls = EC2Connection return regions # # Reservation methods # def get_all_reserved_instances_offerings(self, reserved_instances_offering_ids=None, instance_type=None, availability_zone=None, product_description=None, filters=None, instance_tenancy=None, offering_type=None, include_marketplace=None, min_duration=None, max_duration=None, max_instance_count=None, next_token=None, max_results=None): """ Describes Reserved Instance offerings that are available for purchase. :type reserved_instances_offering_ids: list :param reserved_instances_id: One or more Reserved Instances offering IDs. :type instance_type: str :param instance_type: Displays Reserved Instances of the specified instance type. :type availability_zone: str :param availability_zone: Displays Reserved Instances within the specified Availability Zone. :type product_description: str :param product_description: Displays Reserved Instances with the specified product description. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :type instance_tenancy: string :param instance_tenancy: The tenancy of the Reserved Instance offering. A Reserved Instance with tenancy of dedicated will run on single-tenant hardware and can only be launched within a VPC. :type offering_type: string :param offering_type: The Reserved Instance offering type. Valid Values: * Heavy Utilization * Medium Utilization * Light Utilization :type include_marketplace: bool :param include_marketplace: Include Marketplace offerings in the response. :type min_duration: int :param min_duration: Minimum duration (in seconds) to filter when searching for offerings. :type max_duration: int :param max_duration: Maximum duration (in seconds) to filter when searching for offerings. :type max_instance_count: int :param max_instance_count: Maximum number of instances to filter when searching for offerings. :type next_token: string :param next_token: Token to use when requesting the next paginated set of offerings. :type max_results: int :param max_results: Maximum number of offerings to return per call. :rtype: list :return: A list of :class:`boto.ec2.reservedinstance.ReservedInstancesOffering`. """ params = {} if reserved_instances_offering_ids is not None: self.build_list_params(params, reserved_instances_offering_ids, 'ReservedInstancesOfferingId') if instance_type: params['InstanceType'] = instance_type if availability_zone: params['AvailabilityZone'] = availability_zone if product_description: params['ProductDescription'] = product_description if filters: self.build_filter_params(params, filters) if instance_tenancy is not None: params['InstanceTenancy'] = instance_tenancy if offering_type is not None: params['OfferingType'] = offering_type if include_marketplace is not None: if include_marketplace: params['IncludeMarketplace'] = 'true' else: params['IncludeMarketplace'] = 'false' if min_duration is not None: params['MinDuration'] = str(min_duration) if max_duration is not None: params['MaxDuration'] = str(max_duration) if max_instance_count is not None: params['MaxInstanceCount'] = str(max_instance_count) if next_token is not None: params['NextToken'] = next_token if max_results is not None: params['MaxResults'] = str(max_results) return self.get_list('DescribeReservedInstancesOfferings', params, [('item', ReservedInstancesOffering)], verb='POST') def get_all_reserved_instances(self, reserved_instances_id=None, filters=None): """ Describes one or more of the Reserved Instances that you purchased. :type reserved_instance_ids: list :param reserved_instance_ids: A list of the reserved instance ids that will be returned. If not provided, all reserved instances will be returned. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.reservedinstance.ReservedInstance` """ params = {} if reserved_instances_id: self.build_list_params(params, reserved_instances_id, 'ReservedInstancesId') if filters: self.build_filter_params(params, filters) return self.get_list('DescribeReservedInstances', params, [('item', ReservedInstance)], verb='POST') def purchase_reserved_instance_offering(self, reserved_instances_offering_id, instance_count=1, limit_price=None): """ Purchase a Reserved Instance for use with your account. ** CAUTION ** This request can result in large amounts of money being charged to your AWS account. Use with caution! :type reserved_instances_offering_id: string :param reserved_instances_offering_id: The offering ID of the Reserved Instance to purchase :type instance_count: int :param instance_count: The number of Reserved Instances to purchase. Default value is 1. :type limit_price: tuple :param instance_count: Limit the price on the total order. Must be a tuple of (amount, currency_code), for example: (100.0, 'USD'). :rtype: :class:`boto.ec2.reservedinstance.ReservedInstance` :return: The newly created Reserved Instance """ params = { 'ReservedInstancesOfferingId': reserved_instances_offering_id, 'InstanceCount': instance_count} if limit_price is not None: params['LimitPrice.Amount'] = str(limit_price[0]) params['LimitPrice.CurrencyCode'] = str(limit_price[1]) return self.get_object('PurchaseReservedInstancesOffering', params, ReservedInstance, verb='POST') def create_reserved_instances_listing(self, reserved_instances_id, instance_count, price_schedules, client_token): """Creates a new listing for Reserved Instances. Creates a new listing for Amazon EC2 Reserved Instances that will be sold in the Reserved Instance Marketplace. You can submit one Reserved Instance listing at a time. The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances. If you want to sell your Reserved Instances, you must first register as a Seller in the Reserved Instance Marketplace. After completing the registration process, you can create a Reserved Instance Marketplace listing of some or all of your Reserved Instances, and specify the upfront price you want to receive for them. Your Reserved Instance listings then become available for purchase. :type reserved_instances_id: string :param reserved_instances_id: The ID of the Reserved Instance that will be listed. :type instance_count: int :param instance_count: The number of instances that are a part of a Reserved Instance account that will be listed in the Reserved Instance Marketplace. This number should be less than or equal to the instance count associated with the Reserved Instance ID specified in this call. :type price_schedules: List of tuples :param price_schedules: A list specifying the price of the Reserved Instance for each month remaining in the Reserved Instance term. Each tuple contains two elements, the price and the term. For example, for an instance that 11 months remaining in its term, we can have a price schedule with an upfront price of $2.50. At 8 months remaining we can drop the price down to $2.00. This would be expressed as:: price_schedules=[('2.50', 11), ('2.00', 8)] :type client_token: string :param client_token: Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. :rtype: list :return: A list of :class:`boto.ec2.reservedinstance.ReservedInstanceListing` """ params = { 'ReservedInstancesId': reserved_instances_id, 'InstanceCount': str(instance_count), 'ClientToken': client_token, } for i, schedule in enumerate(price_schedules): price, term = schedule params['PriceSchedules.%s.Price' % i] = str(price) params['PriceSchedules.%s.Term' % i] = str(term) return self.get_list('CreateReservedInstancesListing', params, [('item', ReservedInstanceListing)], verb='POST') def cancel_reserved_instances_listing( self, reserved_instances_listing_ids=None): """Cancels the specified Reserved Instance listing. :type reserved_instances_listing_ids: List of strings :param reserved_instances_listing_ids: The ID of the Reserved Instance listing to be cancelled. :rtype: list :return: A list of :class:`boto.ec2.reservedinstance.ReservedInstanceListing` """ params = {} if reserved_instances_listing_ids is not None: self.build_list_params(params, reserved_instances_listing_ids, 'ReservedInstancesListingId') return self.get_list('CancelReservedInstancesListing', params, [('item', ReservedInstanceListing)], verb='POST') # # Monitoring # def monitor_instances(self, instance_ids): """ Enable CloudWatch monitoring for the supplied instances. :type instance_id: list of strings :param instance_id: The instance ids :rtype: list :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo` """ params = {} self.build_list_params(params, instance_ids, 'InstanceId') return self.get_list('MonitorInstances', params, [('item', InstanceInfo)], verb='POST') def monitor_instance(self, instance_id): """ Deprecated Version, maintained for backward compatibility. Enable CloudWatch monitoring for the supplied instance. :type instance_id: string :param instance_id: The instance id :rtype: list :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo` """ return self.monitor_instances([instance_id]) def unmonitor_instances(self, instance_ids): """ Disable CloudWatch monitoring for the supplied instance. :type instance_id: list of string :param instance_id: The instance id :rtype: list :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo` """ params = {} self.build_list_params(params, instance_ids, 'InstanceId') return self.get_list('UnmonitorInstances', params, [('item', InstanceInfo)], verb='POST') def unmonitor_instance(self, instance_id): """ Deprecated Version, maintained for backward compatibility. Disable CloudWatch monitoring for the supplied instance. :type instance_id: string :param instance_id: The instance id :rtype: list :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo` """ return self.unmonitor_instances([instance_id]) # # Bundle Windows Instances # def bundle_instance(self, instance_id, s3_bucket, s3_prefix, s3_upload_policy): """ Bundle Windows instance. :type instance_id: string :param instance_id: The instance id :type s3_bucket: string :param s3_bucket: The bucket in which the AMI should be stored. :type s3_prefix: string :param s3_prefix: The beginning of the file name for the AMI. :type s3_upload_policy: string :param s3_upload_policy: Base64 encoded policy that specifies condition and permissions for Amazon EC2 to upload the user's image into Amazon S3. """ params = {'InstanceId': instance_id, 'Storage.S3.Bucket': s3_bucket, 'Storage.S3.Prefix': s3_prefix, 'Storage.S3.UploadPolicy': s3_upload_policy} s3auth = boto.auth.get_auth_handler(None, boto.config, self.provider, ['s3']) params['Storage.S3.AWSAccessKeyId'] = self.aws_access_key_id signature = s3auth.sign_string(s3_upload_policy) params['Storage.S3.UploadPolicySignature'] = signature return self.get_object('BundleInstance', params, BundleInstanceTask, verb='POST') def get_all_bundle_tasks(self, bundle_ids=None, filters=None): """ Retrieve current bundling tasks. If no bundle id is specified, all tasks are retrieved. :type bundle_ids: list :param bundle_ids: A list of strings containing identifiers for previously created bundling tasks. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. """ params = {} if bundle_ids: self.build_list_params(params, bundle_ids, 'BundleId') if filters: self.build_filter_params(params, filters) return self.get_list('DescribeBundleTasks', params, [('item', BundleInstanceTask)], verb='POST') def cancel_bundle_task(self, bundle_id): """ Cancel a previously submitted bundle task :type bundle_id: string :param bundle_id: The identifier of the bundle task to cancel. """ params = {'BundleId': bundle_id} return self.get_object('CancelBundleTask', params, BundleInstanceTask, verb='POST') def get_password_data(self, instance_id): """ Get encrypted administrator password for a Windows instance. :type instance_id: string :param instance_id: The identifier of the instance to retrieve the password for. """ params = {'InstanceId': instance_id} rs = self.get_object('GetPasswordData', params, ResultSet, verb='POST') return rs.passwordData # # Cluster Placement Groups # def get_all_placement_groups(self, groupnames=None, filters=None): """ Get all placement groups associated with your account in a region. :type groupnames: list :param groupnames: A list of the names of placement groups to retrieve. If not provided, all placement groups will be returned. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.placementgroup.PlacementGroup` """ params = {} if groupnames: self.build_list_params(params, groupnames, 'GroupName') if filters: self.build_filter_params(params, filters) return self.get_list('DescribePlacementGroups', params, [('item', PlacementGroup)], verb='POST') def create_placement_group(self, name, strategy='cluster'): """ Create a new placement group for your account. This will create the placement group within the region you are currently connected to. :type name: string :param name: The name of the new placement group :type strategy: string :param strategy: The placement strategy of the new placement group. Currently, the only acceptable value is "cluster". :rtype: bool :return: True if successful """ params = {'GroupName':name, 'Strategy':strategy} group = self.get_status('CreatePlacementGroup', params, verb='POST') return group def delete_placement_group(self, name): """ Delete a placement group from your account. :type key_name: string :param key_name: The name of the keypair to delete """ params = {'GroupName':name} return self.get_status('DeletePlacementGroup', params, verb='POST') # Tag methods def build_tag_param_list(self, params, tags): keys = sorted(tags.keys()) i = 1 for key in keys: value = tags[key] params['Tag.%d.Key'%i] = key if value is not None: params['Tag.%d.Value'%i] = value i += 1 def get_all_tags(self, filters=None): """ Retrieve all the metadata tags associated with your account. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: dict :return: A dictionary containing metadata tags """ params = {} if filters: self.build_filter_params(params, filters) return self.get_list('DescribeTags', params, [('item', Tag)], verb='POST') def create_tags(self, resource_ids, tags): """ Create new metadata tags for the specified resource ids. :type resource_ids: list :param resource_ids: List of strings :type tags: dict :param tags: A dictionary containing the name/value pairs. If you want to create only a tag name, the value for that tag should be the empty string (e.g. ''). """ params = {} self.build_list_params(params, resource_ids, 'ResourceId') self.build_tag_param_list(params, tags) return self.get_status('CreateTags', params, verb='POST') def delete_tags(self, resource_ids, tags): """ Delete metadata tags for the specified resource ids. :type resource_ids: list :param resource_ids: List of strings :type tags: dict or list :param tags: Either a dictionary containing name/value pairs or a list containing just tag names. If you pass in a dictionary, the values must match the actual tag values or the tag will not be deleted. If you pass in a value of None for the tag value, all tags with that name will be deleted. """ if isinstance(tags, list): tags = {}.fromkeys(tags, None) params = {} self.build_list_params(params, resource_ids, 'ResourceId') self.build_tag_param_list(params, tags) return self.get_status('DeleteTags', params, verb='POST') # Network Interface methods def get_all_network_interfaces(self, filters=None): """ Retrieve all of the Elastic Network Interfaces (ENI's) associated with your account. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.networkinterface.NetworkInterface` """ params = {} if filters: self.build_filter_params(params, filters) return self.get_list('DescribeNetworkInterfaces', params, [('item', NetworkInterface)], verb='POST') def create_network_interface(self, subnet_id, private_ip_address=None, description=None, groups=None): """ Creates a network interface in the specified subnet. :type subnet_id: str :param subnet_id: The ID of the subnet to associate with the network interface. :type private_ip_address: str :param private_ip_address: The private IP address of the network interface. If not supplied, one will be chosen for you. :type description: str :param description: The description of the network interface. :type groups: list :param groups: Lists the groups for use by the network interface. This can be either a list of group ID's or a list of :class:`boto.ec2.securitygroup.SecurityGroup` objects. :rtype: :class:`boto.ec2.networkinterface.NetworkInterface` :return: The newly created network interface. """ params = {'SubnetId': subnet_id} if private_ip_address: params['PrivateIpAddress'] = private_ip_address if description: params['Description'] = description if groups: ids = [] for group in groups: if isinstance(group, SecurityGroup): ids.append(group.id) else: ids.append(group) self.build_list_params(params, ids, 'SecurityGroupId') return self.get_object('CreateNetworkInterface', params, NetworkInterface, verb='POST') def attach_network_interface(self, network_interface_id, instance_id, device_index): """ Attaches a network interface to an instance. :type network_interface_id: str :param network_interface_id: The ID of the network interface to attach. :type instance_id: str :param instance_id: The ID of the instance that will be attached to the network interface. :type device_index: int :param device_index: The index of the device for the network interface attachment on the instance. """ params = {'NetworkInterfaceId': network_interface_id, 'InstanceId': instance_id, 'DeviceIndex': device_index} return self.get_status('AttachNetworkInterface', params, verb='POST') def detach_network_interface(self, attachement_id, force=False): """ Detaches a network interface from an instance. :type attachment_id: str :param attachment_id: The ID of the attachment. :type force: bool :param force: Set to true to force a detachment. """ params = {'AttachmentId': network_interface_id} if force: params['Force'] = 'true' return self.get_status('DetachNetworkInterface', params, verb='POST') def delete_network_interface(self, network_interface_id): """ Delete the specified network interface. :type network_interface_id: str :param network_interface_id: The ID of the network interface to delete. """ params = {'NetworkInterfaceId': network_interface_id} return self.get_status('DeleteNetworkInterface', params, verb='POST') def get_all_vmtypes(self): """ Get all vmtypes available on this cloud (eucalyptus specific) :rtype: list of :class:`boto.ec2.vmtype.VmType` :return: The requested VmType objects """ params = {} return self.get_list('DescribeVmTypes', params, [('euca:item', VmType)], verb='POST')
1
8,934
PEP-8/consistency with the rest of the code.
boto-boto
py
@@ -0,0 +1,15 @@ +using Datadog.Trace.ClrProfiler.Integrations; + +namespace Datadog.Trace.ClrProfiler.AutoInstrumentation.MongoDb +{ + internal abstract class MongoDbInstrumentMethodAttribute : InstrumentMethodAttribute + { + protected MongoDbInstrumentMethodAttribute(string typeName) + { + AssemblyName = MongoDbIntegration.MongoDbClientAssembly; + TypeName = typeName; + IntegrationName = MongoDbIntegration.IntegrationName; + ParameterTypeNames = new[] { "MongoDB.Driver.Core.Connections.IConnection", ClrNames.CancellationToken }; + } + } +}
1
1
19,136
nit: Would you mind moving the `ParameterTypeNames` assignment into each of the method-specific attributes? That could reduce confusion if we later decided to instrument other methods in MongoDb
DataDog-dd-trace-dotnet
.cs
@@ -0,0 +1,16 @@ +using System.Collections.Generic; +using Nethermind.Blockchain.Find; +using Nethermind.Core; +using Nethermind.Core.Specs; +using Nethermind.Int256; + +namespace Nethermind.JsonRpc.Modules.Eth +{ + public interface IGasPriceOracle + { + public ISpecProvider SpecProvider { get; } + public UInt256? FallbackGasPrice { get; } + public List<UInt256> TxGasPriceList { get; } + ResultWrapper<UInt256?> GasPriceEstimate(Block? headBlock, IBlockFinder blockFinder); + } +}
1
1
25,649
Is exposing SpecProvider needed here?
NethermindEth-nethermind
.cs
@@ -141,6 +141,18 @@ namespace MvvmCross.ViewModels } } + [NotifyPropertyChangedInvocator] + protected virtual bool SetProperty<T>(ref T storage, T value, Action afterAction, [CallerMemberName] string propertyName = null) + { + if (!SetProperty(ref storage, value, propertyName)) + { + return false; + } + + afterAction?.Invoke(); + return true; + } + [NotifyPropertyChangedInvocator] protected virtual bool SetProperty<T>(ref T storage, T value, [CallerMemberName] string propertyName = null) {
1
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MS-PL license. // See the LICENSE file in the project root for more information. using System; using System.Collections.Generic; using System.ComponentModel; using System.Linq.Expressions; using System.Runtime.CompilerServices; using System.Threading.Tasks; using MvvmCross.Annotations; using MvvmCross.Base; using MvvmCross.Logging; namespace MvvmCross.ViewModels { public abstract class MvxNotifyPropertyChanged : MvxMainThreadDispatchingObject , IMvxNotifyPropertyChanged { private static readonly PropertyChangedEventArgs AllPropertiesChanged = new PropertyChangedEventArgs(string.Empty); public event PropertyChangedEventHandler PropertyChanged; public event PropertyChangingEventHandler PropertyChanging; private bool _shouldAlwaysRaiseInpcOnUserInterfaceThread; private bool _shouldRaisePropertyChanging; private bool _shouldLogInpc; public bool ShouldAlwaysRaiseInpcOnUserInterfaceThread() { return _shouldAlwaysRaiseInpcOnUserInterfaceThread; } public void ShouldAlwaysRaiseInpcOnUserInterfaceThread(bool value) { _shouldAlwaysRaiseInpcOnUserInterfaceThread = value; } public bool ShouldRaisePropertyChanging() { return _shouldRaisePropertyChanging; } public void ShouldRaisePropertyChanging(bool value) { _shouldRaisePropertyChanging = value; } public bool ShouldLogInpc() { return _shouldLogInpc; } public void ShouldLogInpc(bool value) { _shouldLogInpc = value; } protected MvxNotifyPropertyChanged() { var alwaysOnUIThread = MvxSingletonCache.Instance == null || MvxSingletonCache.Instance.Settings.AlwaysRaiseInpcOnUserInterfaceThread; ShouldAlwaysRaiseInpcOnUserInterfaceThread(alwaysOnUIThread); var raisePropertyChanging = MvxSingletonCache.Instance == null || MvxSingletonCache.Instance.Settings.ShouldRaisePropertyChanging; ShouldRaisePropertyChanging(raisePropertyChanging); var shouldLogInpc = MvxSingletonCache.Instance != null && MvxSingletonCache.Instance.Settings.ShouldLogInpc; ShouldLogInpc(shouldLogInpc); } public bool RaisePropertyChanging<T>(T newValue, Expression<Func<T>> property) { var name = this.GetPropertyNameFromExpression(property); return RaisePropertyChanging(newValue, name); } public bool RaisePropertyChanging<T>(T newValue, [CallerMemberName] string whichProperty = "") { var changedArgs = new MvxPropertyChangingEventArgs<T>(whichProperty, newValue); return RaisePropertyChanging(changedArgs); } public virtual bool RaisePropertyChanging<T>(MvxPropertyChangingEventArgs<T> changingArgs) { // check for interception before broadcasting change if (InterceptRaisePropertyChanging(changingArgs) == MvxInpcInterceptionResult.DoNotRaisePropertyChanging) return !changingArgs.Cancel; if (ShouldLogInpc()) MvxLog.Instance.Trace($"Property '{changingArgs.PropertyName}' changing value to {changingArgs.NewValue.ToString()}"); PropertyChanging?.Invoke(this, changingArgs); return !changingArgs.Cancel; } [NotifyPropertyChangedInvocator] public Task RaisePropertyChanged<T>(Expression<Func<T>> property) { var name = this.GetPropertyNameFromExpression(property); return RaisePropertyChanged(name); } [NotifyPropertyChangedInvocator] public virtual Task RaisePropertyChanged([CallerMemberName] string whichProperty = "") { var changedArgs = new PropertyChangedEventArgs(whichProperty); return RaisePropertyChanged(changedArgs); } public virtual Task RaiseAllPropertiesChanged() { return RaisePropertyChanged(AllPropertiesChanged); } public virtual async Task RaisePropertyChanged(PropertyChangedEventArgs changedArgs) { // check for interception before broadcasting change if (InterceptRaisePropertyChanged(changedArgs) == MvxInpcInterceptionResult.DoNotRaisePropertyChanged) return; void raiseChange() { if (ShouldLogInpc()) MvxLog.Instance.Trace($"Property '{changedArgs.PropertyName}' value changed"); PropertyChanged?.Invoke(this, changedArgs); } void exceptionMasked() => MvxMainThreadDispatcher.ExceptionMaskedAction(raiseChange, true); if (ShouldAlwaysRaiseInpcOnUserInterfaceThread()) { // check for subscription before potentially causing a cross-threaded call if (PropertyChanged == null) return; await InvokeOnMainThreadAsync(exceptionMasked); } else { exceptionMasked(); } } [NotifyPropertyChangedInvocator] protected virtual bool SetProperty<T>(ref T storage, T value, [CallerMemberName] string propertyName = null) { if (EqualityComparer<T>.Default.Equals(storage, value)) { return false; } if (ShouldRaisePropertyChanging()) { var shouldSetValue = RaisePropertyChanging(value, propertyName); if (!shouldSetValue) return false; } storage = value; RaisePropertyChanged(propertyName); return true; } protected virtual MvxInpcInterceptionResult InterceptRaisePropertyChanged(PropertyChangedEventArgs changedArgs) { if (MvxSingletonCache.Instance != null) { var interceptor = MvxSingletonCache.Instance.InpcInterceptor; if (interceptor != null) { return interceptor.Intercept(this, changedArgs); } } return MvxInpcInterceptionResult.NotIntercepted; } protected virtual MvxInpcInterceptionResult InterceptRaisePropertyChanging(PropertyChangingEventArgs changingArgs) { if (MvxSingletonCache.Instance != null) { var interceptor = MvxSingletonCache.Instance.InpcInterceptor; if (interceptor != null) { return interceptor.Intercept(this, changingArgs); } } return MvxInpcInterceptionResult.NotIntercepted; } } }
1
14,789
So this will only be executed when it is true. I'm not sure that makes it always usable. Why not make the afterAction, `Action<bool>` and always call it, with the result as parameter.
MvvmCross-MvvmCross
.cs
@@ -516,6 +516,7 @@ int pfs_table::resolve_name(int is_special_syscall, const char *cname, struct pf char tmp[PFS_PATH_MAX]; path_split(pname->path,pname->service_name,tmp); pname->service = pfs_service_lookup(pname->service_name); + if (result == PFS_RESOLVE_LOCAL) pname->service = NULL; if(!pname->service) { pname->service = pfs_service_lookup_default(); strcpy(pname->service_name,"local");
1
/* Copyright (C) 2003-2004 Douglas Thain and the University of Wisconsin Copyright (C) 2005- The University of Notre Dame This software is distributed under the GNU General Public License. See the file COPYING for details. */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include "pfs_search.h" #include "pfs_table.h" #include "pfs_service.h" #include "pfs_pointer.h" #include "pfs_file.h" #include "pfs_mmap.h" #include "pfs_process.h" #include "pfs_file_cache.h" #include "pfs_resolve.h" extern "C" { #include "pfs_channel.h" #include "buffer.h" #include "debug.h" #include "full_io.h" #include "get_canonical_path.h" #include "hash_table.h" #include "macros.h" #include "md5.h" #include "memfdexe.h" #include "path.h" #include "pattern.h" #include "random.h" #include "stringtools.h" } #include <dirent.h> #include <fcntl.h> #include <fnmatch.h> #include <unistd.h> #include <sys/mman.h> #include <sys/resource.h> #include <sys/stat.h> #include <sys/time.h> #ifndef major /* glibc 2.26 drops sys/sysmacros.h from sys/types.h, thus we add it here */ #include <sys/sysmacros.h> #endif #include <assert.h> #include <ctype.h> #include <errno.h> #include <math.h> #include <stdarg.h> #include <stddef.h> #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #ifndef O_CLOEXEC # define O_CLOEXEC 02000000 #endif #define E_OK 10000 extern int pfs_force_stream; extern int pfs_force_sync; extern int pfs_follow_symlinks; extern int pfs_enable_small_file_optimizations; extern int pfs_no_flock; extern const char * pfs_initial_working_directory; static const int _SENTINEL1 = 0; #define NATIVE ((pfs_pointer *)&_SENTINEL1) static const int _SENTINEL2 = 0; #define SPECIAL ((pfs_pointer *)&_SENTINEL2) #define SPECIAL_POINTER(pointer) (pointer == SPECIAL) #define NATIVE_POINTER(pointer) (pointer == NATIVE) #define PARROT_POINTER(pointer) (!(SPECIAL_POINTER(pointer) || NATIVE_POINTER(pointer) || pointer == NULL)) #define VALID_FD(fd) (0 <= fd && fd < pointer_count) #define PARROT_FD(fd) (VALID_FD(fd) && PARROT_POINTER(pointers[fd])) #define CHECK_FD(fd) \ do {\ if (!PARROT_FD(fd))\ return (errno = EBADF, -1);\ } while (0) pfs_table::pfs_table() { int i; if(pfs_initial_working_directory) { strcpy(working_dir,pfs_initial_working_directory); } else { ::getcwd(working_dir,sizeof(working_dir)); } pointer_count = sysconf(_SC_OPEN_MAX); pointers = new pfs_pointer* [pointer_count]; fd_flags = new int[pointer_count]; mmap_list = 0; for( i=0; i<pointer_count; i++ ) { pointers[i] = 0; fd_flags[i] = 0; } } pfs_table::~pfs_table() { for(int i=0;i<pointer_count;i++) { this->close(i); } while(mmap_list) { pfs_mmap *m = mmap_list; mmap_list = m->next; delete m; } delete [] pointers; delete [] fd_flags; } pfs_table * pfs_table::fork() { pfs_table *table = new pfs_table; for(int i=0;i<pointer_count;i++) { if(this->pointers[i]) { table->fd_flags[i] = this->fd_flags[i]; table->pointers[i] = this->pointers[i]; if (PARROT_POINTER(this->pointers[i])) { this->pointers[i]->addref(); this->pointers[i]->file->addref(); } } } strcpy(table->working_dir,this->working_dir); pfs_mmap *m; for(m=mmap_list;m;m=m->next) { pfs_mmap *n = new pfs_mmap(m); n->next = table->mmap_list; table->mmap_list = n; } return table; } void pfs_table::setparrot(int fd, int rfd, struct stat *buf) { if (!PARROT_FD(fd)) fatal("fd %d is not an open parrotfd", fd); if(fd == rfd || (VALID_FD(rfd) && pointers[rfd] == NULL)) { /* do nothing */ } else { fatal("setparrot: fd %d rfd %d valid %d ptr %p",fd,rfd,VALID_FD(rfd),pointers[rfd]); } assert(fd == rfd || (VALID_FD(rfd) && pointers[rfd] == NULL)); /* It's possible for another thread to create a native fd which is equal to * the parrot fd. If that happens we change the parrot fd to what the * kernel gave us. Keep in mind that we don't need to worry about another * racing thread which overwrites pointers[fd] with NATIVE because after * opening a parrot fd, we ignore other tracees and wait for openat to * return the actual parrot fd. */ if (rfd != fd) { debug(D_DEBUG, "parrotfd %d changed to real fd %d", fd, rfd); pointers[rfd] = pointers[fd]; fd_flags[rfd] = fd_flags[fd]; pointers[fd] = NULL; fd_flags[fd] = 0; fd = rfd; } debug(D_DEBUG, "setting parrotfd %d to %p (%d:%d)", fd, pointers[fd], (int)buf->st_dev, (int)buf->st_ino); assert(S_ISREG(buf->st_mode)); pointers[fd]->bind(buf->st_dev, buf->st_ino); } int pfs_table::bind( int fd, char *lpath, size_t len ) { if (!isnative(fd)) return (errno = EBADF, -1); assert(strlen(lpath) > 0); /* Resolve the path... */ struct pfs_name pname; if (!resolve_name(1, lpath, &pname, F_OK)) return -1; if (!pname.is_local) return (errno = EOPNOTSUPP, -1); if (strlen(pname.rest) >= len) return (errno = ENAMETOOLONG, -1); strcpy(lpath, pname.rest); return 0; } void pfs_table::close_on_exec() { for(int i=0;i<pointer_count;i++) { if(pointers[i] /* parrot, special, or native */ && fd_flags[i]&FD_CLOEXEC) { assert(pointers[i] != SPECIAL); debug(D_DEBUG, "closing on exec: %d", i); close(i); } } pfs_mmap *m; while(mmap_list) { m = mmap_list; mmap_list = m->next; delete m; } } /* Connect this logical file descriptor in the table to this physical file descriptor in the tracing process. */ void pfs_table::attach( int logical, int physical, int flags, mode_t mode, const char *name, struct stat *buf ) { char selfname[PATH_MAX] = ""; assert(VALID_FD(logical) && pointers[logical] == NULL); if (!name) { char path[PATH_MAX]; snprintf(path, PATH_MAX, "/proc/self/fd/%d", physical); if (::readlink(path, selfname, sizeof selfname - 1) == -1) { fatal("could not get name for fd %d: %s", physical, strerror(errno)); } name = selfname; } pointers[logical] = new pfs_pointer(pfs_file_bootstrap(physical,name),flags,mode); fd_flags[logical] = 0; setparrot(logical, logical, buf); } void pfs_table::setnative( int fd, int fdflags ) { debug(D_DEBUG, "setting fd %d as native%s", fd, fdflags & FD_CLOEXEC ? " (FD_CLOEXEC)" : ""); assert(VALID_FD(fd) && (pointers[fd] == NULL || pointers[fd] == NATIVE)); pointers[fd] = NATIVE; fd_flags[fd] = fdflags; } void pfs_table::setspecial( int fd ) { debug(D_DEBUG, "setting fd %d as special", fd); assert(VALID_FD(fd) && pointers[fd] == NULL); pointers[fd] = SPECIAL; fd_flags[fd] = 0; } int pfs_table::isvalid( int fd ) { return VALID_FD(fd); } int pfs_table::isnative( int fd ) { return VALID_FD(fd) && pointers[fd] == NATIVE; } int pfs_table::isparrot( int fd ) { return PARROT_FD(fd); } int pfs_table::isspecial( int fd ) { return VALID_FD(fd) && pointers[fd] == SPECIAL; } void pfs_table::recvfd( pid_t pid, int fd ) { struct stat buf; if (pfs_process_stat(pid, fd, &buf) == -1) fatal("could not stat %d: %s", fd, strerror(errno)); debug(D_DEBUG, "received fd %d", fd); pfs_pointer *pointer = pfs_pointer::lookup(buf.st_dev, buf.st_ino); if (pointer) { debug(D_DEBUG, "binding parrotfd %d to %p", fd, pointer); pointers[fd] = pointer; fd_flags[fd] = 0; /* No need to increment reference, sendfd (below) did so. */ } else { setnative(fd, 0); } } void pfs_table::sendfd( int fd, int errored ) { if (PARROT_POINTER(pointers[fd])) { if (errored == 0) { char path[4096]; get_full_name(fd, path); debug(D_DEBUG, "sending parrot fd %d: `%s'", fd, path); pointers[fd]->addref(); pointers[fd]->file->addref(); } else { /* the kernel raised an error sending the fd, decrement the reference count */ pointers[fd]->delref(); pointers[fd]->file->delref(); } } else if (pointers[fd] == NATIVE && errored == 0) { debug(D_DEBUG, "sending native fd %d", fd); } /* else SPECIAL, we don't care */ } /* Chose the lowest numbered file descriptor that is available. */ int pfs_table::find_empty( int lowest ) { int fd; for( fd=lowest; fd<pointer_count; fd++ ) { if( !pointers[fd] ) { return fd; } } return -1; } /* If short_path is an absolute path, copy it to full path. Otherwise, tack the current or symlink directory on to the front of short_path, and copy it to full_path. */ void pfs_table::complete_path( const char *short_path, const char *parent_dir, char *full_path ) { if( short_path[0]=='/' ) { strcpy(full_path,short_path); } else { strcpy(full_path,parent_dir?parent_dir:working_dir); strcat(full_path,"/"); strcat(full_path,short_path); } assert(full_path[0] == '/'); } /* Complete a path, starting with this fd assumed to be a directory. */ #ifndef AT_FDCWD # define AT_FDCWD -100 #endif int pfs_table::complete_at_path( int dirfd, const char *path, char *full_path ) { if(path) { if(path[0]=='/') { strcpy(full_path,path); } else { if(dirfd==AT_FDCWD) { sprintf(full_path,"%s/%s",working_dir,path); } else { if (get_full_name(dirfd,full_path) == -1) return -1; strcat(full_path,"/"); strcat(full_path,path); } } } else { /* some *at syscalls (see utimensat) allow path to be NULL, fill full_path with path of dirfd */ if(dirfd==AT_FDCWD) { strcpy(full_path,working_dir); } else { if (get_full_name(dirfd,full_path) == -1) return -1; } } debug(D_DEBUG, "%s: `%s' -> `%s'", __func__, path, full_path); return 0; } void pfs_table::follow_symlink( struct pfs_name *pname, mode_t mode, int depth ) { char link_target[PFS_PATH_MAX]; char link_parent[PFS_PATH_MAX]; struct pfs_name new_pname = *pname; int in_proc = false; if (string_prefix_is(pname->path, "/proc/")) in_proc = true; int rlres = new_pname.service->readlink(pname,link_target,PFS_PATH_MAX-1); if (rlres > 0) { /* readlink does not NULL-terminate */ link_target[rlres] = '\000'; /* * Some locations in /proc (e.g. /proc/$PID/ns/, /proc/$PID/fd/ * with pipes) might contain magic dangling symlinks that can * nonetheless be opened as usual. If Parrot tries to follow them, * it will return erroneous ENOENT. While under /proc, don't try * to follow symlinks of this form. */ if (in_proc && string_match_regex(link_target, "^[a-z]+:\\[[0-9]+\\]$")) return; if (in_proc && string_match_regex(link_target, "^anon_inode:\\[?[a-zA-Z_0-9]+\\]?$")) return; const char *basename_start = path_basename(pname->logical_name); size_t dirname_len = basename_start - pname->logical_name; strncpy(link_parent, pname->logical_name, dirname_len); link_parent[dirname_len] = '\0'; if (resolve_name(0, link_target, &new_pname, mode, true, depth + 1, link_parent)) { *pname = new_pname; } } } /* Given a logical name from the application, expand it into a fully-qualified logical name, resolve it according to the mount list, split it into its components, and fill in the name structure. Return true on success, false otherwise. */ extern int pfs_master_timeout; extern FILE *namelist_file; extern struct hash_table *namelist_table; /* All the syscalls calling "resolve_name" function can be divided into two categories: special_syscall & others. special_syscall: {"open_object", "bind32", "connect32", "bind64", "connect64", "truncate", "link1", "mkalloc", "lsalloc", "whoami", "md5", "copyfile1", "copyfile2"}; As for special_syscall, the copy degree of the involved file will be fullcopy; the copy degree of files involved in other syscalls will be metadatacopy. The following syscalls were fullcopy before, but now become metadatacopy. -- "lstat", "stat", "follow_symlink", "link2", "symlink2", "readlink", "unlink" */ void namelist_table_insert(const char *content, int is_special_syscall) { char *item_value; item_value = (char *)hash_table_lookup(namelist_table, content); const char *METADATA, *FULLCOPY; METADATA = "metadatacopy"; FULLCOPY = "fullcopy"; if(!item_value) { if(is_special_syscall) { hash_table_insert(namelist_table, content, FULLCOPY); } else { hash_table_insert(namelist_table, content, METADATA); } } else if(item_value == METADATA && is_special_syscall) { hash_table_remove(namelist_table, content); hash_table_insert(namelist_table, content, FULLCOPY); } } int pfs_table::resolve_name(int is_special_syscall, const char *cname, struct pfs_name *pname, mode_t mode, bool do_follow_symlink, int depth, const char *parent_dir ) { char full_logical_name[PFS_PATH_MAX]; pfs_resolve_t result; size_t n; if (depth > PFS_MAX_RESOLVE_DEPTH) return errno = ELOOP, 0; if(strlen(cname) == 0) return errno = ENOENT, 0; complete_path(cname,parent_dir,full_logical_name); path_collapse(full_logical_name,pname->logical_name,1); /* Check permissions to edit parent directory entry. */ if(mode & E_OK) { char dirname[PFS_PATH_MAX]; char tmp[PFS_PATH_MAX]; mode &= ~E_OK; path_dirname(pname->logical_name, dirname); result = pfs_resolve(dirname,tmp,W_OK,time(0)+pfs_master_timeout); switch(result) { case PFS_RESOLVE_DENIED: return errno = EACCES, 0; case PFS_RESOLVE_ENOENT: return errno = ENOENT, 0; case PFS_RESOLVE_FAILED: fatal("unable to resolve parent directory %s",dirname); return 0; default: break; } } result = pfs_resolve(pname->logical_name,pname->path,mode,time(0)+pfs_master_timeout); if(namelist_table) { namelist_table_insert(pname->path, is_special_syscall); } if(result==PFS_RESOLVE_DENIED) { return errno = EACCES, 0; } else if(result==PFS_RESOLVE_ENOENT) { return errno = ENOENT, 0; } else if(result==PFS_RESOLVE_FAILED) { fatal("unable to resolve file %s",pname->logical_name); return 0; } else { char tmp[PFS_PATH_MAX]; path_split(pname->path,pname->service_name,tmp); pname->service = pfs_service_lookup(pname->service_name); if(!pname->service) { pname->service = pfs_service_lookup_default(); strcpy(pname->service_name,"local"); strcpy(pname->host,"localhost"); strcpy(pname->hostport,"localhost"); strcpy(pname->rest,pname->path); pname->is_local = 1; } else if (!strncmp(pname->service_name, "ext_", 4)) { strcpy(pname->rest, tmp); strcpy(pname->host, "ext"); strcpy(pname->hostport, "ext"); pname->port = 0; } else { if(!strcmp(pname->service_name,"multi")) {// if we're dealing with a multivolume, split off at the @ path_split_multi(tmp,pname->host,pname->rest); } else { path_split(tmp,pname->host,pname->rest); } if(!pname->host[0]) { pname->hostport[0] = 0; pname->rest[0] = 0; return 1; } if (!strcmp(pname->service_name, "grow") && !strcmp(pname->host, "local")) { pname->host[0] = 0; pname->port = 0; strcpy(pname->hostport, "local"); } else { char *c = strrchr(pname->host, ':'); if(c) { *c = 0; pname->port = atoi(c+1); } else { pname->port = pname->service->get_default_port(); } sprintf(pname->hostport,"%s:%d",pname->host,pname->port); } if(!strcmp(pname->service_name,"multi")) { strcpy(tmp,pname->rest); path_split(tmp,&pname->hostport[strlen(pname->hostport)],pname->rest); // reconstruct hostport as host:port@volume; path goes in rest. } if(pname->service->tilde_is_special() && !strncmp(pname->rest,"/~",2)) { memmove(pname->rest,&pname->rest[1],strlen(pname->rest)); } pname->is_local = 0; } if(pattern_match(pname->path, "^/proc/self/?()", &n) >= 0) { strncpy(full_logical_name, pname->path, sizeof(full_logical_name)); string_nformat(pname->path, sizeof(pname->path), "/proc/%d/%s", pfs_process_getpid(), &full_logical_name[n]); strcpy(pname->logical_name, pname->path); strcpy(pname->rest, pname->path); pname->service = pfs_service_lookup_default(); strcpy(pname->service_name,"local"); strcpy(pname->host,"localhost"); strcpy(pname->hostport,"localhost"); pname->is_local = 1; } else if (pattern_match(pname->path, "^/dev/fd/?()", &n) >= 0) { strncpy(full_logical_name, pname->path, sizeof(full_logical_name)); string_nformat(pname->path, sizeof(pname->path), "/proc/%d/fd/%s", pfs_process_getpid(), &full_logical_name[n]); strcpy(pname->logical_name, pname->path); strcpy(pname->rest, pname->path); pname->service = pfs_service_lookup_default(); strcpy(pname->service_name,"local"); strcpy(pname->host,"localhost"); strcpy(pname->hostport,"localhost"); pname->is_local = 1; } /* Enable cross service symlink resolution */ if (do_follow_symlink && pfs_follow_symlinks) { follow_symlink(pname, mode, depth + 1); } return 1; } } pfs_dir * pfs_table::open_directory(pfs_name *pname, int flags) { pfs_dir *file; if((flags&O_RDWR)||(flags&O_WRONLY)) { errno = EISDIR; file = 0; } else { file = pname->service->getdir(pname); } return file; } pfs_pointer *pfs_table::getopenfile( pid_t pid, int fd ) { struct pfs_process *target = pfs_process_lookup(pid); if(target && target->table) { if (!target->table->isvalid(fd)) { return (errno = ENOENT, (pfs_pointer *)NULL); } pfs_pointer *desc = target->table->pointers[fd]; if (PARROT_POINTER(desc)) { return desc; } else if (NATIVE_POINTER(desc)) { return (errno = ECHILD, (pfs_pointer *)NULL); /* hack, allow open to proceed natively */ } else { assert(desc == SPECIAL || desc == NULL); return (errno = ENOENT, (pfs_pointer *)NULL); } } else { return (errno = ESRCH, (pfs_pointer *)NULL); } } pfs_file * pfs_table::open_object( const char *lname, int flags, mode_t mode, int force_cache ) { pfs_name pname; pfs_file *file=0; mode_t open_mode = X_OK; int force_stream = pfs_force_stream; if(flags & O_RDWR) { open_mode |= R_OK|W_OK; } else if (flags & O_WRONLY) { open_mode |= W_OK; } else { open_mode |= R_OK; } // Hack: Disable caching when doing plain old file copies. if( !strcmp(pfs_current->name,"cp") || !strcmp(string_back(pfs_current->name,3),"/cp") ) { force_stream = 1; } // Hack: Almost all calls to open a directory are routed // through opendir(), which sets O_DIRECTORY. In a few // cases, such as the use of openat in pwd, the flag // is not set, set we detect it here. const char *basename = path_basename(lname); if(!strcmp(basename,".") || !strcmp(basename,"..")) { flags |= O_DIRECTORY; } // If a file is opened with O_CREAT, we should check for write permissions // on the parent directory. However, this seems to cause problems if // system directories (or the filesystem root) are marked RO. if(resolve_name(1,lname,&pname,open_mode)) { if((flags&O_CREAT) && (flags&O_DIRECTORY)) { // Linux ignores O_DIRECTORY in this combination flags &= ~O_DIRECTORY; } char *pid = NULL; if(flags&O_DIRECTORY) { if (pattern_match(pname.rest, "^/proc/(%d+)/fd/?$", &pid) >= 0) { int i; pfs_dir *dir = new pfs_dir(&pname); pid_t ipid = atoi(pid); /* idea here is to not include a SPECIAL fd in this directory */ for (i = 0; i < pointer_count; i++) { pfs_pointer *desc = getopenfile(ipid, i); if (desc || errno == ECHILD) { struct dirent dirent; dirent.d_ino = random_uint(); dirent.d_off = 0; dirent.d_reclen = sizeof(dirent); snprintf(dirent.d_name, sizeof(dirent.d_name), "%d", i); dirent.d_type = DT_LNK; dir->append(&dirent); } } file = dir; } else { file = open_directory(&pname, flags); } } else if(pname.service->is_local()) { char *fd = NULL; if (pattern_match(pname.rest, "^/proc/(%d+)/fd/(%d+)$", &pid, &fd) >= 0) { pfs_pointer *desc = getopenfile(atoi(pid), atoi(fd)); if (desc) { desc->file->addref(); return desc->file; } else if (errno == ESRCH || errno == ECHILD) { /* outside of Parrot or native, let kernel deal with it... */ file = pname.service->open(&pname,flags,mode); if(!file && (errno == EISDIR)) { file = open_directory(&pname, flags); } } } else if (pattern_match(pname.rest, "^/proc/(%d+)/maps$", &pid) >= 0) { extern char pfs_temp_per_instance_dir[PATH_MAX]; static const char name[] = "parrot-maps"; int fd = memfdexe(name, pfs_temp_per_instance_dir); if (fd >= 0) { buffer_t B[1]; buffer_init(B); mmap_proc(atoi(pid), B); full_write(fd, buffer_tostring(B), buffer_pos(B)); ::lseek(fd, 0, SEEK_SET); buffer_free(B); file = pfs_file_bootstrap(fd, name); } else { errno = ENOENT; file = 0; } } else { file = pname.service->open(&pname,flags,mode); if(!file && (errno == EISDIR)) { file = open_directory(&pname, flags); } } free(fd); } else if(pname.service->is_seekable()) { if(force_cache) { file = pfs_cache_open(&pname,flags,mode); if(!file && (errno == EISDIR)) { file = open_directory(&pname, flags); } } else { file = pname.service->open(&pname,flags,mode); if(!file && (errno == EISDIR)) { file = open_directory(&pname, flags); } } } else { if(force_stream) { file = pname.service->open(&pname,flags,mode); if(!file && (errno == EISDIR)) { file = open_directory(&pname, flags); } } else { file = pfs_cache_open(&pname,flags,mode); if(!file && (errno == EISDIR)) { file = open_directory(&pname, flags); } } } free(pid); } else { file = 0; } return file; } int pfs_table::open( const char *lname, int flags, mode_t mode, int force_cache, char *path, size_t len ) { int result = -1; pfs_file *file=0; /* Apply the umask to our mode */ mode = mode &~(pfs_current->umask); #if defined(linux) & !defined(O_BINARY) #define O_BINARY 0x8000 #endif /* Get rid of meaningless undocumented flags */ flags = flags & ~O_BINARY; #ifdef O_SYNC if(pfs_force_sync) flags |= O_SYNC; #endif result = find_empty(0); if(result>=0) { file = open_object(lname,flags,mode,force_cache); if(file) { if(path && file->canbenative(path, len)) { file->close(); result = -2; } else { pointers[result] = new pfs_pointer(file,flags,mode); fd_flags[result] = 0; if (flags&O_CLOEXEC) fd_flags[result] |= FD_CLOEXEC; if(flags&O_APPEND) this->lseek(result,0,SEEK_END); } } else if (errno == ECHILD /* hack: indicates to open natively */) { snprintf(path, len, "%s", lname); result = -2; } else { result = -1; } } else { errno = EMFILE; result = -1; } return result; } int pfs_table::get_real_fd( int fd ) { CHECK_FD(fd); return pointers[fd]->file->get_real_fd(); } int pfs_table::get_full_name( int fd, char *name ) { CHECK_FD(fd); strcpy(name,pointers[fd]->file->get_name()->path); return 0; } int pfs_table::get_local_name( int fd, char *name ) { CHECK_FD(fd); return pointers[fd]->file->get_local_name(name); } /* Close is a little tricky. The file pointer might be in use by several dups, or the file itself might be in use by several opens. */ int pfs_table::close( int fd ) { /* FIXME: if a previously mmaped file is written to, we ought to clean up * the channel cache on close. Otherwise, subsequent mmaps might return * stale data. Related: * https://github.com/cooperative-computing-lab/cctools/issues/1584 */ if (isnative(fd)) { debug(D_DEBUG, "marking closed native fd %d", fd); pointers[fd] = NULL; fd_flags[fd] = 0; return 0; } else { CHECK_FD(fd); debug(D_DEBUG, "closing parrot fd %d", fd); pfs_pointer *p = pointers[fd]; pfs_file *f = p->file; int result = 0; if(f->refs()==1) { result = f->close(); delete f; } else { f->delref(); } if(p->refs()==1) { delete p; } else { p->delref(); } pointers[fd]=0; fd_flags[fd]=0; return result; } } pfs_ssize_t pfs_table::read( int fd, void *data, pfs_size_t nbyte ) { pfs_ssize_t result = -1; CHECK_FD(fd); result = this->pread(fd,data,nbyte,pointers[fd]->tell()); if(result>0) pointers[fd]->bump(result); return result; } pfs_ssize_t pfs_table::write( int fd, const void *data, pfs_size_t nbyte ) { pfs_ssize_t result = -1; CHECK_FD(fd); result = this->pwrite(fd,data,nbyte,pointers[fd]->tell()); if(result>0) pointers[fd]->bump(result); return result; } static void stream_warning( pfs_file *f ) { if(!f->get_name()->is_local && !pfs_current->did_stream_warning) { debug(D_NOTICE,"Program: %s",pfs_current->name); debug(D_NOTICE,"Is using file: %s",f->get_name()->path); debug(D_NOTICE,"For non-sequential access."); debug(D_NOTICE,"This won't work with streaming (-s) turned on."); pfs_current->did_stream_warning = 1; } } pfs_ssize_t pfs_table::pread( int fd, void *data, pfs_size_t nbyte, pfs_off_t offset ) { pfs_ssize_t result = -1; CHECK_FD(fd); if( (!data) || (nbyte<0) ) { errno = EINVAL; result = -1; } else if( nbyte==0 ) { result = 0; } else { pfs_file *f = pointers[fd]->file; if(!f->is_seekable() && f->get_last_offset()!=offset) { stream_warning(f); errno = ESPIPE; result = -1; } else { result = f->read( data, nbyte, offset ); if(result>0) f->set_last_offset(offset+result); } } return result; } pfs_ssize_t pfs_table::pwrite( int fd, const void *data, pfs_size_t nbyte, pfs_off_t offset ) { pfs_ssize_t result = -1; CHECK_FD(fd); if( (!data) || (nbyte<0) ) { errno = EINVAL; result = -1; } else if( nbyte==0 ) { result = 0; } else { pfs_file *f = pointers[fd]->file; if(!f->is_seekable() && f->get_last_offset()!=offset) { stream_warning(f); errno = ESPIPE; result = -1; } else { result = f->write( data, nbyte, offset ); if(result>0) f->set_last_offset(offset+result); } } return result; } pfs_ssize_t pfs_table::readv( int fd, const struct iovec *vector, int count ) { int i; pfs_ssize_t result = 0; pfs_ssize_t chunk; CHECK_FD(fd); for( i = 0; i < count; i++ ) { chunk = this->read( fd, vector->iov_base, vector->iov_len ); if( chunk < 0 ) return chunk; result += chunk; if( chunk != (pfs_ssize_t) vector->iov_len ) return result; vector++; } return result; } pfs_ssize_t pfs_table::writev( int fd, const struct iovec *vector, int count ) { int i; pfs_ssize_t result = 0; pfs_ssize_t chunk; CHECK_FD(fd); for( i = 0; i < count; i++ ) { chunk = this->write( fd, vector->iov_base, vector->iov_len ); if( chunk < 0 ) return chunk; result += chunk; if( chunk != (pfs_ssize_t) vector->iov_len ) return result; vector++; } return result; } pfs_off_t pfs_table::lseek( int fd, pfs_off_t offset, int whence ) { pfs_file *f; pfs_pointer *p; pfs_off_t result = -1; CHECK_FD(fd); p = pointers[fd]; f = p->file; if(!f->is_seekable()) { errno = ESPIPE; result = -1; } else { result = p->seek(offset,whence); } return result; } int pfs_table::dup2( int ofd, int nfd, int flags ) { if (!VALID_FD(ofd) || !VALID_FD(nfd)) return (errno = EBADF, -1); if (ofd == nfd) return nfd; debug(D_DEBUG, "dup2(%d, %d, %x)", ofd, nfd, flags); close(nfd); pointers[nfd] = pointers[ofd]; if (PARROT_POINTER(pointers[nfd])) { pointers[nfd]->addref(); pointers[nfd]->file->addref(); } fd_flags[nfd] = flags; return nfd; } int pfs_table::fchdir( int fd ) { int result = -1; CHECK_FD(fd); pfs_name *pname = pointers[fd]->file->get_name(); result = this->chdir(pname->path); return result; } int pfs_table::ftruncate( int fd, pfs_off_t size ) { int result = -1; CHECK_FD(fd); if( size<0 ) { result = 0; } else { result = pointers[fd]->file->ftruncate(size); } return result; } int pfs_table::fstat( int fd, struct pfs_stat *b ) { int result; CHECK_FD(fd); pfs_file *file = pointers[fd]->file; result = file->fstat(b); if(result>=0) { b->st_blksize = file->get_block_size(); } return result; } int pfs_table::fstatfs( int fd, struct pfs_statfs *buf ) { CHECK_FD(fd); return pointers[fd]->file->fstatfs(buf); } int pfs_table::fsync( int fd ) { CHECK_FD(fd); return pointers[fd]->file->fsync(); } int pfs_table::flock( int fd, int op ) { CHECK_FD(fd); if (pfs_no_flock) return 0; return pointers[fd]->file->flock(op); } int pfs_table::fcntl( int fd, int cmd, void *arg ) { int result; int flags; if (!VALID_FD(fd)) return (errno = EBADF, -1); /* fcntl may operate on the *file descriptor* table or the *open file description* table */ if (cmd == F_GETFD || cmd == F_SETFD) { if (!(PARROT_POINTER(pointers[fd]) || NATIVE_POINTER(pointers[fd]))) return (errno = EBADF, -2); if (cmd == F_GETFD) { result = fd_flags[fd]; } else if (cmd == F_SETFD) { fd_flags[fd] = (intptr_t)arg; result = 0; } else assert(0); return result; } /* now open file description table: */ if (!PARROT_POINTER(pointers[fd])) return (errno = EBADF, -1); switch (cmd) { case F_GETFL: result = pointers[fd]->flags; break; case F_SETFL: flags = (PTRINT_T)arg; pointers[fd]->flags = flags; flags |= O_NONBLOCK; pointers[fd]->file->fcntl(cmd,(void*)(PTRINT_T)flags); result = 0; break; /* A length of zero to FREESP indicates the file should be truncated at the start value. Otherwise, we don't support it. */ #ifdef F_FREESP case F_FREESP: { struct flock *f = (struct flock *)arg; if( (f->l_whence==0) && (f->l_len==0) ) { result = this->ftruncate(fd,f->l_start); } else { errno = ENOSYS; result = -1; } } break; #endif #ifdef F_FREESP64 case F_FREESP64: { struct flock64 *f64 = (struct flock64 *)arg; if( (f64->l_whence==0) && (f64->l_len==0) ) { result = this->ftruncate(fd,f64->l_start); } else { errno = ENOSYS; result = -1; } } break; #endif default: result = pointers[fd]->file->fcntl(cmd,arg); break; } return result; } int pfs_table::fchmod( int fd, mode_t mode ) { CHECK_FD(fd); return pointers[fd]->file->fchmod(mode); } int pfs_table::fchown( int fd, struct pfs_process *p, uid_t uid, gid_t gid ) { CHECK_FD(fd); int result = pointers[fd]->file->fchown(uid,gid); /* If the service doesn't implement it, but its our own uid, then fake success, as tools like cp do this very often. */ if(result<0 && errno==ENOSYS && uid==p->euid && gid==p->egid) { result = 0; } return result; } /* Some things to note about chdir. We rely on the underlying service to resolve complex paths containing symbolic links, parents (..), and so forth, by performing the chdir and then returning the new canonical name for the path. It is not correct for us to simply unwind such paths ourselves, because by following those elements, we may end up somewhere completely new. However, not all services have this capability. (For example, rfio.) So, if the returned canonical name has unusual elements, they must be cleaned up before they are recorded in the working directory. */ int pfs_table::chdir( const char *path ) { int result = -1; char newpath[PFS_PATH_MAX]; pfs_name pname; /* This is a special case in Unix, do not attempt to complete the path and then change directory. */ if(path[0]==0) { errno = ENOENT; return -1; } if(resolve_name(0,path,&pname,X_OK)) { result = pname.service->chdir(&pname,newpath); if(result>=0) { path_collapse(pname.logical_name,working_dir,1); result = 0; } } return result; } char *pfs_table::getcwd( char *path, pfs_size_t size ) { char cwd[PFS_PATH_MAX]; strcpy(cwd, working_dir); path_remove_trailing_slashes(cwd); if (strlen(cwd)+1 > (size_t)size) { errno = ERANGE; return NULL; } strcpy(path, cwd); return path; } int pfs_table::access( const char *n, mode_t mode ) { pfs_name pname; int result = -1; if(resolve_name(0,n,&pname,X_OK | mode)) { result = pname.service->access(&pname,mode); } return result; } int pfs_table::chmod( const char *n, mode_t mode ) { pfs_name pname; int result=-1; if(resolve_name(0,n,&pname,W_OK)) { result = pname.service->chmod(&pname,mode); } return result; } int pfs_table::chown( const char *n, struct pfs_process *p, uid_t uid, gid_t gid ) { pfs_name pname; int result=-1; if(resolve_name(0,n,&pname,W_OK)) { result = pname.service->chown(&pname,uid,gid); } /* If the service doesn't implement it, but its our own uid, then fake success, as tools like cp do this very often. */ if(result<0 && errno==ENOSYS && uid==p->euid && gid==p->egid) { result = 0; } return result; } int pfs_table::lchown( const char *n, uid_t uid, gid_t gid ) { pfs_name pname; int result=-1; if(resolve_name(0,n,&pname,W_OK,false)) { result = pname.service->lchown(&pname,uid,gid); } return result; } int pfs_table::truncate( const char *n, pfs_off_t offset ) { pfs_name pname; int result=-1; if(resolve_name(1,n,&pname,W_OK)) { result = pname.service->truncate(&pname,offset); } return result; } ssize_t pfs_table::getxattr (const char *path, const char *name, void *value, size_t size) { pfs_name pname; int result=-1; if(resolve_name(0,path,&pname,R_OK)) { result = pname.service->getxattr(&pname,name,value,size); } return result; } ssize_t pfs_table::lgetxattr (const char *path, const char *name, void *value, size_t size) { pfs_name pname; int result=-1; if(resolve_name(0,path,&pname,R_OK,false)) { result = pname.service->lgetxattr(&pname,name,value,size); } return result; } ssize_t pfs_table::fgetxattr (int fd, const char *name, void *value, size_t size) { CHECK_FD(fd); return pointers[fd]->file->fgetxattr(name,value,size); } ssize_t pfs_table::listxattr (const char *path, char *list, size_t size) { pfs_name pname; int result=-1; if(resolve_name(0,path,&pname,R_OK)) { result = pname.service->listxattr(&pname,list,size); } return result; } ssize_t pfs_table::llistxattr (const char *path, char *list, size_t size) { pfs_name pname; int result=-1; if(resolve_name(0,path,&pname,R_OK,false)) { result = pname.service->llistxattr(&pname,list,size); } return result; } ssize_t pfs_table::flistxattr (int fd, char *list, size_t size) { CHECK_FD(fd); return pointers[fd]->file->flistxattr(list,size); } int pfs_table::setxattr (const char *path, const char *name, const void *value, size_t size, int flags) { pfs_name pname; int result=-1; if(resolve_name(0,path,&pname,W_OK)) { result = pname.service->setxattr(&pname,name,value,size,flags); } return result; } int pfs_table::lsetxattr (const char *path, const char *name, const void *value, size_t size, int flags) { pfs_name pname; int result=-1; if(resolve_name(0,path,&pname,W_OK,false)) { result = pname.service->lsetxattr(&pname,name,value,size,flags); } return result; } int pfs_table::fsetxattr (int fd, const char *name, const void *value, size_t size, int flags) { CHECK_FD(fd); return pointers[fd]->file->fsetxattr(name,value,size,flags); } int pfs_table::removexattr (const char *path, const char *name) { pfs_name pname; int result=-1; if(resolve_name(0,path,&pname,W_OK)) { result = pname.service->removexattr(&pname,name); } return result; } int pfs_table::lremovexattr (const char *path, const char *name) { pfs_name pname; int result=-1; if(resolve_name(0,path,&pname,W_OK,false)) { result = pname.service->lremovexattr(&pname,name); } return result; } int pfs_table::fremovexattr (int fd, const char *name) { CHECK_FD(fd); return pointers[fd]->file->fremovexattr(name); } int pfs_table::utime( const char *n, struct utimbuf *buf ) { pfs_name pname; int result=-1; if(resolve_name(0,n,&pname,W_OK)) { result = pname.service->utime(&pname,buf); } return result; } int pfs_table::utimens( const char *n, const struct timespec times[2] ) { pfs_name pname; int result=-1; if(resolve_name(0,n,&pname,W_OK)) { result = pname.service->utimens(&pname,times); } return result; } int pfs_table::lutimens( const char *n, const struct timespec times[2] ) { pfs_name pname; int result=-1; if(resolve_name(0,n,&pname,W_OK,false)) { result = pname.service->lutimens(&pname,times); } return result; } int pfs_table::unlink( const char *n ) { pfs_name pname; int result = -1; if(resolve_name(0,n,&pname,E_OK,false)) { result = pname.service->unlink(&pname); if(result==0) { pfs_cache_invalidate(&pname); pfs_channel_update_name(pname.path,0); } } return result; } int pfs_table::stat( const char *n, struct pfs_stat *b ) { pfs_name pname; int result = -1; /* You don't need to have read permission on a file to stat it. */ if(resolve_name(0,n,&pname,F_OK)) { result = pname.service->stat(&pname,b); if(result>=0) { b->st_blksize = pname.service->get_block_size(); } else if(errno==ENOENT && !pname.hostport[0]) { pfs_service_emulate_stat(&pname,b); b->st_mode = S_IFDIR | 0555; result = 0; } } return result; } int pfs_table::statfs( const char *n, struct pfs_statfs *b ) { pfs_name pname; int result = -1; /* You don't need to have read permission on a file to stat it. */ if(resolve_name(0,n,&pname,F_OK)) { result = pname.service->statfs(&pname,b); } return result; } int pfs_table::lstat( const char *n, struct pfs_stat *b ) { pfs_name pname; int result=-1; /* You don't need to have read permission on a file to stat it. */ if(resolve_name(0,n,&pname,F_OK,false)) { result = pname.service->lstat(&pname,b); if(result>=0) { b->st_blksize = pname.service->get_block_size(); } else if(errno==ENOENT && !pname.hostport[0]) { pfs_service_emulate_stat(&pname,b); b->st_mode = S_IFDIR | 0555; result = 0; } } return result; } int pfs_table::rename( const char *n1, const char *n2 ) { pfs_name p1, p2; int result = -1; if(resolve_name(0,n1,&p1,E_OK,false) && resolve_name(0,n2,&p2,E_OK,false)) { if(p1.service==p2.service) { result = p1.service->rename(&p1,&p2); if(result==0) { pfs_cache_invalidate(&p1); pfs_cache_invalidate(&p2); pfs_channel_update_name(p1.path, p2.path); } } else { errno = EXDEV; } } return result; } int pfs_table::link( const char *n1, const char *n2 ) { pfs_name p1, p2; int result = -1; // Require write on the target to prevent linking into a RW directory // and bypassing restrictions if(resolve_name(0,n1,&p1,W_OK,false) && resolve_name(0,n2,&p2,E_OK,false)) { if(p1.service==p2.service) { result = p1.service->link(&p1,&p2); } else { errno = EXDEV; } } return result; } int pfs_table::symlink( const char *target, const char *path ) { pfs_name pname; int result = -1; /* Note carefully: Symlinks are used to store all sorts of information by applications. They need not be valid, and we often cannot interpret them at runtime. Thus, we only call resolve_name on the link name, not on the contents. The link contents are passed verbatim down to the needed driver. */ if(resolve_name(0,path,&pname,E_OK,false)) { result = pname.service->symlink(target,&pname); } return result; } /* Readlink is ordinarily passed down to each driver. However, when we are examining the /proc filesystem, there are a few elements that must be manually interpreted so that the caller gets the logical name rather than the physical name, which may have been redirected to the cache directory. Note that /proc/self is handled in resolve_name, where it is manually mapped to /proc/(pid), otherwise the path would refer to parrot itself. */ int pfs_table::readlink( const char *n, char *buf, pfs_size_t size ) { pfs_name pname; int result=-1; if(resolve_name(0,n,&pname,R_OK,false)) { char *pid = NULL, *fd = NULL; if(pattern_match(pname.path, "^/proc/(%d+)/fd/(%d+)$",&pid,&fd) >= 0) { pfs_pointer *desc = getopenfile(atoi(pid), atoi(fd)); if (desc) { const char *path = desc->file->get_name()->path; strncpy(buf,path,size); result = MIN(strlen(path),(size_t)size); } else if (errno == ECHILD) { /* native... */ result = ::readlink(pname.path,buf,size); } else { result = -1; } } else if(pattern_match(pname.path, "^/proc/(%d+)/exe", &pid) >= 0) { struct pfs_process *target = pfs_process_lookup(atoi(pid)); if(target) { const char *path = target->name; size_t count = MIN(strlen(path), (size_t)size); memcpy(buf,path,count); result = (int)count; } else { result = pname.service->readlink(&pname,buf,size); } } else { result = pname.service->readlink(&pname,buf,size); } free(pid); free(fd); } else { result = -1; errno = ENOENT; } return result; } int pfs_table::mknod( const char *n, mode_t mode, dev_t dev ) { pfs_name pname; int result=-1; if(resolve_name(0,n,&pname,E_OK)) { result = pname.service->mknod(&pname,mode,dev); } return result; } int pfs_table::mkdir( const char *n, mode_t mode ) { pfs_name pname; int result=-1; if(resolve_name(0,n,&pname,E_OK)) { result = pname.service->mkdir(&pname,mode); } return result; } int pfs_table::rmdir( const char *n ) { pfs_name pname; int result=-1; if(resolve_name(0,n,&pname,E_OK,false)) { result = pname.service->rmdir(&pname); } return result; } struct dirent * pfs_table::fdreaddir( int fd ) { if (!PARROT_FD(fd)) return (errno = EBADF, (struct dirent *)NULL); pfs_off_t next_offset; pfs_pointer *fp = pointers[fd]; struct dirent *result = fp->file->fdreaddir(fp->tell(),&next_offset); if(result) fp->seek(next_offset,SEEK_SET); return result; } int pfs_table::mkalloc( const char *n, pfs_ssize_t size, mode_t mode ) { pfs_name pname; int result=-1; if(resolve_name(0,n,&pname,E_OK)) { result = pname.service->mkalloc(&pname,size,mode); } return result; } int pfs_table::lsalloc( const char *n, char *a, pfs_ssize_t *total, pfs_ssize_t *avail ) { pfs_name pname; int result=-1; if(resolve_name(1,n,&pname,R_OK)) { result = pname.service->lsalloc(&pname,a,total,avail); if(result==0) { strcpy(a,pname.path); } } return result; } int pfs_table::whoami( const char *n, char *buf, int length ) { pfs_name pname; int result = -1; if(resolve_name(1,n,&pname,F_OK)) { result = pname.service->whoami(&pname,buf,length); } return result; } static int search_to_access (int flags) { int access_flags = F_OK; if (flags & PFS_SEARCH_R_OK) access_flags |= R_OK; if (flags & PFS_SEARCH_W_OK) access_flags |= W_OK; if (flags & PFS_SEARCH_X_OK) access_flags |= X_OK; return access_flags; } static int search_error (int err, int errsource, char *path, char *buffer, size_t *i, size_t len) { size_t n = snprintf(buffer+*i, len-*i, "%s%d|%d|%s", *i==0 ? "" : "|", err, errsource, path); if (n>=len-*i) { errno = ERANGE; return -1; } else { *i += n; return 0; } } static int search_stat_pack(const struct pfs_stat *p_info, char *buffer, size_t *i, size_t len) { struct stat info; COPY_STAT(*p_info, info); size_t n = snprintf(buffer + *i, len - *i, "|%ld,%ld,%ld,%ld," "%ld,%ld,%ld,%ld," "%ld,%ld,%ld,%ld," "%ld", (long)info.st_dev, (long)info.st_ino, (long)info.st_mode, (long)info.st_nlink, (long)info.st_uid, (long)info.st_gid, (long)info.st_rdev, (long)info.st_size, (long)info.st_atime, (long)info.st_mtime, (long)info.st_ctime, (long)info.st_blksize, (long)info.st_blocks ); if (n>=len-*i) { return -1; } else { *i += n; return 0; } } /* NOTICE: this function's logic should be kept in sync with function of same * name in chirp_fs_local.c. */ static int search_match_file(const char *pattern, const char *name) { debug(D_DEBUG, "search_match_file(`%s', `%s')", pattern, name); /* Decompose the pattern in atoms which are each matched against. */ while (1) { char atom[PFS_PATH_MAX]; const char *end = strchr(pattern, '|'); /* disjunction operator */ memset(atom, 0, sizeof(atom)); if (end) strncpy(atom, pattern, (size_t)(end-pattern)); else strcpy(atom, pattern); /* Here we might have a pattern like '*' which matches any file so we * iteratively pull leading components off of `name' until we get a * match. In the case of '*', we would pull off all leading components * until we reach the file name, which would always match '*'. */ const char *test = name; do { int result = fnmatch(atom, test, FNM_PATHNAME); debug(D_DEBUG, "fnmatch(`%s', `%s', FNM_PATHNAME) = %d", atom, test, result); if(result == 0) { return 1; } test = strchr(test, '/'); if (test) test += 1; } while (test); if (end) pattern = end+1; else break; } return 0; } /* NOTICE: this function's logic should be kept in sync with function of same * name in chirp_fs_local.c. */ static int search_should_recurse(const char *base, const char *pattern) { debug(D_DEBUG, "search_should_recurse(base = `%s', pattern = `%s')", base, pattern); /* Decompose the pattern in atoms which are each matched against. */ while (1) { char atom[PFS_PATH_MAX]; if (*pattern != '/') return 1; /* unanchored pattern is always recursive */ const char *end = strchr(pattern, '|'); /* disjunction operator */ memset(atom, 0, sizeof(atom)); if (end) strncpy(atom, pattern, (size_t)(end-pattern)); else strcpy(atom, pattern); /* Here we want to determine if `base' matches earlier parts of * `pattern' to see if we should recurse in the directory `base'. To do * this, we strip off final parts of `pattern' until we get a match. */ while (*atom) { int result = fnmatch(atom, base, FNM_PATHNAME); debug(D_DEBUG, "fnmatch(`%s', `%s', FNM_PATHNAME) = %d", atom, base, result); if(result == 0) { return 1; } char *last = strrchr(atom, '/'); if (last) { *last = '\0'; } else { break; } } if (end) pattern = end+1; else break; } return 0; } /* NOTICE: this function's logic should be kept in sync with function of same * name in chirp_fs_local.c. */ static int search_directory(pfs_table *t, const char * const base, char *fullpath, const char *pattern, int flags, char *buffer, size_t len, size_t *i) { if(strlen(pattern) == 0) return 0; debug(D_DEBUG, "search_directory(base = `%s', fullpath = `%s', pattern = `%s', flags = %d, ...)", base, fullpath, pattern, flags); int metadata = flags & PFS_SEARCH_METADATA; int stopatfirst = flags & PFS_SEARCH_STOPATFIRST; int includeroot = flags & PFS_SEARCH_INCLUDEROOT; int result = 0; int fd = t->open(fullpath, O_DIRECTORY|O_RDONLY, 0, 0, NULL, 0); char *current = fullpath + strlen(fullpath); /* point to end to current directory */ if(fd >= 0) { errno = 0; struct dirent *entry; while((entry = t->fdreaddir(fd))) { struct pfs_stat buf; int access_flags = search_to_access(flags); char *name = entry->d_name; if(strcmp(name, ".") == 0 || strcmp(name, "..") == 0) continue; sprintf(current, "/%s", name); int stat_result = t->stat(fullpath, &buf); if(search_match_file(pattern, base)) { const char *matched = includeroot ? fullpath+1 : base; /* fullpath+1 because chirp_root_path is always "./" !! */ result += 1; if(access_flags == F_OK || t->access(fullpath, access_flags) == 0) { if(metadata) { if(stat_result) { if (search_error(errno, PFS_SEARCH_ERR_STAT, fullpath, buffer, i, len) == -1) return -1; } else { size_t l = snprintf(buffer+*i, len-*i, "%s0|%s", *i==0 ? "" : "|", matched); if (l >= len-*i) { errno = ERANGE; return -1; } *i += l; if (search_stat_pack(&buf, buffer, i, len) == -1) { errno = ERANGE; return -1; } if(stopatfirst) return 1; } } else { size_t l = snprintf(buffer+*i, len-*i, "%s0|%s|", *i == 0 ? "" : "|", matched); if (l >= len-*i) { errno = ERANGE; return -1; } *i += l; if(stopatfirst) return 1; } } /* FIXME access failure */ } if(stat_result == 0 && S_ISDIR(buf.st_mode) && search_should_recurse(base, pattern)) { int n = search_directory(t, base, fullpath, pattern, flags, buffer, len, i); if(n > 0) { result += n; if(stopatfirst) return result; } } *current = '\0'; /* clear current entry */ errno = 0; } if (errno) { if (search_error(errno, PFS_SEARCH_ERR_READ, fullpath, buffer, i, len) == -1) { t->close(fd); /* can't report error anyway at this point */ errno = ERANGE; return -1; } } if (t->close(fd) == -1) { if (search_error(errno, PFS_SEARCH_ERR_CLOSE, fullpath, buffer, i, len) == -1) { errno = ERANGE; return -1; } } } else { if (search_error(errno, PFS_SEARCH_ERR_OPEN, fullpath, buffer, i, len) == -1) { errno = ERANGE; return -1; } } return result; } static int is_pattern (const char *pattern) { if (*pattern != '/') return 1; /* unrooted expressions are patterns */ for (; *pattern; pattern += 1) { switch (*pattern) { case '\\': #if 0 /* we need to change the pattern to remove the backslashes * so we can do exact matches, future work. */ pattern += 1; if (*pattern == '\0') { return 0; } break; #endif case '*': case '?': case '[': case '|': return 1; case '"': case '\'': { /* const char quote = *pattern; quote = quote; */ /* quoting behavior isn't very clear... */ } default: break; } } return 0; } int pfs_table::search( const char *paths, const char *patt, int flags, char *buffer, size_t buffer_length, size_t *i ) { pfs_name pname; const char *start = paths; const char *end; const char *pattern = patt; int found = 0; int result; debug(D_DEBUG, "%s(%s, %s, %d, %p, %zu, %p)", __FUNCTION__, paths, patt, flags, buffer, buffer_length, i); int done = 0; do { if (strlen(start)==0) break; char path[PFS_PATH_MAX+1]; char directory[PFS_PATH_MAX+1]; end = strchr(start, PFS_SEARCH_DELIMITER); if (end) { if (start == end) { /* "::" ? */ strcpy(path, "."); } else { ptrdiff_t length = end-start; /* C++ doesn't let us properly cast these to void pointers for proper byte length */ memset(path, 0, sizeof(path)); strncpy(path, start, length); } start = end+1; } else { strcpy(path, start); done = 1; } path_collapse(path, directory, 0); debug(D_DEBUG, "searching directory `%s'", directory); if (!is_pattern(pattern)) { struct pfs_stat statbuf; int access_flags = search_to_access(flags); const char *base = directory + strlen(directory); debug(D_DEBUG, "pattern `%s' will be exactly matched", pattern); strcat(directory, pattern); result = this->stat(directory, &statbuf); if (result == 0) { const char *matched; if (flags & PFS_SEARCH_INCLUDEROOT) matched = directory; else matched = base; if (access_flags == F_OK || this->access(directory, access_flags) == 0) { size_t l = snprintf(buffer+*i, buffer_length-*i, "%s0|%s", *i==0 ? "" : "|", matched); if (l >= buffer_length-*i) { errno = ERANGE; return -1; } *i += l; if (flags & PFS_SEARCH_METADATA) { if (search_stat_pack(&statbuf, buffer, i, buffer_length) == -1) { errno = ERANGE; return -1; } } else { if ((size_t)snprintf(buffer+*i, buffer_length-*i, "|") >= buffer_length-*i) { errno = ERANGE; return -1; } (*i)++; } result = 1; } } else { result = 0; } } else { /* Check to see if search is implemented in the service */ if(resolve_name(0,path, &pname, X_OK)) { debug(D_DEBUG, "attempting service `%s' search routine for path `%s'", pname.service_name, pname.path); if ((result = pname.service->search(&pname, pattern, flags, buffer, buffer_length, i))==-1 && errno == ENOSYS) { debug(D_DEBUG, "no service to search found: falling back to manual search `%s'", directory); result = search_directory(this, directory+strlen(directory), directory, pattern, flags, buffer, buffer_length, i); } debug(D_DEBUG, "= %d (`%s' search)", result, pname.service_name); } else result = -1; } if (result == -1) return -errno; else if (flags & PFS_SEARCH_STOPATFIRST && result == 1) { return result; } else found += result; } while (!done); return found; } int pfs_table::getacl( const char *n, char *buf, int length ) { pfs_name pname; int result = -1; if(resolve_name(0,n,&pname,R_OK)) { result = pname.service->getacl(&pname,buf,length); } return result; } int pfs_table::setacl( const char *n, const char *subject, const char *rights ) { pfs_name pname; int result = -1; if(resolve_name(0,n,&pname,W_OK)) { result = pname.service->setacl(&pname,subject,rights); } return result; } int pfs_table::locate( const char *n, char *buf, int length ) { static pfs_location *loc = 0; pfs_name pname; debug(D_SYSCALL, "locating \"%s\"", n); if(n && strlen(n)) { if(loc) delete(loc); loc = 0; if(resolve_name(0, n, &pname, X_OK)) { loc = pname.service->locate(&pname); } } if(loc) { int result = 0; char path[PFS_PATH_MAX]; result = loc->retrieve(path, PFS_PATH_MAX); if(result) { memset(buf, 0, length); strncpy(buf, path, length); return result; } } return 0; } pfs_ssize_t pfs_table::copyfile( const char *source, const char *target ) { pfs_name psource, ptarget; pfs_file *sourcefile; pfs_file *targetfile; pfs_stat info; pfs_ssize_t result; if(!pfs_enable_small_file_optimizations) { errno = ENOSYS; return -1; } if(resolve_name(1,source,&psource,R_OK)<0) return -1; if(resolve_name(1,target,&ptarget,W_OK|E_OK)<0) return -1; if(psource.service == ptarget.service) { result = ptarget.service->thirdput(&psource,&ptarget); } else if(psource.service->is_local()) { result = ptarget.service->putfile(&psource,&ptarget); } else if(ptarget.service->is_local()) { result = psource.service->getfile(&psource,&ptarget); } else { result = -1; } if(result<0) { if(errno==ENOSYS || psource.service==ptarget.service) { sourcefile = open_object(source,O_RDONLY,0,0); if(!sourcefile) return -1; result = sourcefile->fstat(&info); if(result<0) { sourcefile->close(); delete sourcefile; return -1; } if(S_ISDIR(info.st_mode)) { sourcefile->close(); delete sourcefile; errno = EISDIR; return -1; } targetfile = open_object(target,O_WRONLY|O_CREAT|O_TRUNC,0777,0); if(!targetfile) { sourcefile->close(); delete sourcefile; return -1; } result = copyfile_slow(sourcefile,targetfile); sourcefile->close(); delete sourcefile; targetfile->close(); delete targetfile; } } return result; } pfs_ssize_t pfs_table::fcopyfile(int sourcefd, int targetfd) { CHECK_FD(sourcefd); CHECK_FD(targetfd); if (copyfile_slow(pointers[sourcefd]->file, pointers[targetfd]->file) > -1) { return 0; } else { errno = ENOTTY; return -1; } } pfs_ssize_t pfs_table::copyfile_slow( pfs_file *sourcefile, pfs_file *targetfile ) { pfs_ssize_t total, ractual, wactual; void *buffer; int buffer_size; buffer_size = MAX(sourcefile->get_block_size(),targetfile->get_block_size()); buffer = malloc(buffer_size); total = 0; while(1) { ractual = sourcefile->read(buffer,buffer_size,total); if(ractual<=0) break; wactual = targetfile->write(buffer,ractual,total); if(wactual!=ractual) break; total += ractual; } free(buffer); if(ractual==0) { return total; } else { return -1; } } int pfs_table::md5( const char *path, unsigned char *digest ) { pfs_name pname; int result; if(!pfs_enable_small_file_optimizations) { errno = ENOSYS; return -1; } if(resolve_name(1,path,&pname,R_OK)<0) return -1; result = pname.service->md5(&pname,digest); if(result<0 && errno==ENOSYS) { result = md5_slow(path,digest); } return result; } int pfs_table::md5_slow( const char *path, unsigned char *digest ) { md5_context_t context; pfs_file *file; unsigned char *buffer; int buffer_size; pfs_off_t total=0; int result; file = open_object(path,O_RDONLY,0,0); if(!file) return -1; buffer_size = file->get_block_size(); buffer = (unsigned char *)malloc(buffer_size); md5_init(&context); while(1) { result = file->read(buffer,buffer_size,total); if(result<=0) break; md5_update(&context,buffer,result); total += result; } file->close(); delete file; free(buffer); if(result==0) { md5_final(digest,&context); return 0; } else { return -1; } } void pfs_table::mmap_proc (pid_t pid, buffer_t *B) { char path[PATH_MAX]; snprintf(path, sizeof(path), "/proc/%d/maps", (int)pid); FILE *maps = fopen(path, "r"); struct pfs_process *p = pfs_process_lookup(pid); if (p) { for(struct pfs_mmap *m = p->table->mmap_list; m; m = m->next) { buffer_putfstring(B, "%016" PRIx64 "-%016" PRIx64, (uint64_t)(uintptr_t)m->logical_addr, (uint64_t)(uintptr_t)m->logical_addr+(uint64_t)m->map_length); buffer_putfstring(B, " "); buffer_putfstring(B, "%c", m->prot & PROT_READ ? 'r' : '-'); buffer_putfstring(B, "%c", m->prot & PROT_WRITE ? 'w' : '-'); buffer_putfstring(B, "%c", m->prot & PROT_EXEC ? 'w' : '-'); buffer_putfstring(B, "%c", m->flags & MAP_PRIVATE ? 'p' : '-'); buffer_putfstring(B, " "); buffer_putfstring(B, "%16" PRIx64, m->file_offset); buffer_putfstring(B, " "); buffer_putfstring(B, "%02" PRIx32 ":%02" PRIx32, major(m->finfo.st_dev), minor(m->finfo.st_dev)); buffer_putfstring(B, " "); buffer_putfstring(B, "%8" PRIu64, m->finfo.st_ino); buffer_putfstring(B, " "); buffer_putfstring(B, "%s", m->fpath); buffer_putfstring(B, "\n"); } } if (maps) { char line[4096]; while (fgets(line, sizeof(line), maps)) { /* we reformat some entries for consistency */ char *start = NULL, *end = NULL, *perm = NULL, *off = NULL, *dev = NULL, *ino = NULL, *path = NULL; if (pattern_match(line, "^(%x+)%-(%x+)%s+(%S+)%s+(%x+)%s+([%d:]+)%s+(%d+)%s+(.-)%s*$", &start, &end, &perm, &off, &dev, &ino, &path) >= 0) { size_t current = buffer_pos(B); buffer_putfstring(B, "%016" PRIx64 "-%016" PRIx64, (uint64_t)strtoul(start, NULL, 16), (uint64_t)strtoul(end, NULL, 16)); buffer_putfstring(B, " %s", perm); buffer_putfstring(B, " %16" PRIx64, (uint64_t)strtoul(off, NULL, 16)); buffer_putfstring(B, " %s", dev); buffer_putfstring(B, " %8" PRIu64, (uint64_t)strtoul(ino, NULL, 16)); buffer_putfstring(B, " %s", path); buffer_putliteral(B, "\n"); if (pattern_match(path, "%[%w+%]%s*$") >= 0) { /* OKAY: heap/stack/etc. */ } else if (pattern_match(dev, "0+:0+") >= 0) { /* OKAY: anonymous mapping */ } else if (pattern_match(path, ".-parrot%-channel") < 0) { /* OKAY: ! parrot mapping */ } else { /* not printed */ buffer_rewind(B, current); } } free(start); free(end); free(perm); free(off); free(dev); free(ino); free(path); } fclose(maps); } } void pfs_table::mmap_print() { struct pfs_mmap *m; debug(D_CHANNEL,"%12s %8s %8s %8s %4s %4s %s","address","length","foffset", "channel", "prot", "flag", "file"); for(m=mmap_list;m;m=m->next) { debug(D_CHANNEL,"%12llx %8llx %8llx %8llx %4x %4x %s",(long long)m->logical_addr,(long long)m->map_length,(long long)m->file_offset,(long long)m->channel_offset,m->prot,m->flags,m->file->get_name()->path); } } static int load_file_to_channel( pfs_file *file, pfs_size_t length, pfs_size_t start, pfs_size_t blocksize ) { pfs_size_t data_left = length; pfs_size_t offset = 0; pfs_size_t chunk, actual; while(data_left>0) { chunk = MIN(data_left,blocksize); actual = file->read(pfs_channel_base()+start+offset,chunk,offset); if(actual>0) { offset += actual; data_left -= actual; } else if(actual==0) { memset(pfs_channel_base()+start+offset,0,data_left); offset += data_left; data_left = 0; } else { break; } } if(data_left) { debug(D_CHANNEL,"loading: failed: %s",strerror(errno)); return 0; } else { /* we must invalidate the others' mapping of this file, otherwise, they will see old data that was in this place. */ msync(pfs_channel_base()+start,length,MS_INVALIDATE|MS_ASYNC); return 1; } } static int save_file_from_channel( pfs_file *file, pfs_size_t file_offset, pfs_size_t channel_offset, pfs_size_t map_length, pfs_size_t blocksize ) { pfs_size_t data_left = map_length; pfs_size_t chunk, actual; while(data_left>0) { chunk = MIN(data_left,blocksize); actual = file->write(pfs_channel_base()+channel_offset+file_offset,chunk,file_offset); if(actual>0) { file_offset += actual; data_left -= actual; } else { break; } } if(data_left) { debug(D_CHANNEL,"writing: failed: %s",strerror(errno)); return 0; } return 1; } pfs_size_t pfs_table::mmap_create_object( pfs_file *file, pfs_size_t channel_offset, pfs_size_t map_length, pfs_size_t file_offset, int prot, int flags ) { pfs_mmap *m; m = new pfs_mmap(file, 0, channel_offset, map_length, file_offset, prot, flags); m->next = mmap_list; mmap_list = m; return channel_offset; } pfs_size_t pfs_table::mmap_create( int fd, pfs_size_t file_offset, size_t map_length, int prot, int flags ) { pfs_file *file; pfs_size_t channel_offset; pfs_ssize_t file_length; CHECK_FD(fd); if(!(pointers[fd]->flags&(O_WRONLY|O_RDWR|O_APPEND)) && prot&PROT_WRITE && flags&MAP_SHARED) return (errno = EACCES, -1); file = pointers[fd]->file; file_length = file->get_size(); if(file_length<0) return (errno = ENODEV, -1); /* FIXME we don't check the range because it's valid to mmap a file plus extra. However, we don't allocate space in the channel for this! */ //else if(!(file_offset < file_length)) /* beginning of range [off, off+len) */ // return (errno = ENXIO, -1); //else if(!((file_offset+map_length) <= file_length)) /* end of range [off, off+len) */ // return (errno = ENXIO, -1); if(!pfs_channel_lookup(file->get_name()->path,&channel_offset)) { if(!pfs_channel_alloc(file->get_name()->path,file_length,&channel_offset)) return (errno = ENOMEM, -1); debug(D_CHANNEL,"%s loading to channel %llx size %llx",file->get_name()->path,(long long)channel_offset,(long long)file_length); if(!load_file_to_channel(file,file_length,channel_offset,1024*1024)) { pfs_channel_free(channel_offset); return -1; } channel_offset = mmap_create_object(file, channel_offset, map_length, file_offset, prot, flags); /* pfs_channel_alloc adds a ref and so does mmap_create_object, remove the extra: */ pfs_channel_free(channel_offset); return channel_offset; } else { debug(D_CHANNEL,"%s cached at channel %llx",file->get_name()->path,(long long)channel_offset); return mmap_create_object(file, channel_offset, map_length, file_offset, prot, flags); } } int pfs_table::mmap_update( uintptr_t logical_addr, size_t channel_offset ) { if(mmap_list && !mmap_list->logical_addr) { mmap_list->logical_addr = logical_addr; return 0; } debug(D_NOTICE,"warning: mmap logical address (%llx) does not match any map with channel offset (%llx)",(long long)logical_addr,(long long)channel_offset); errno = ENOENT; return -1; } int pfs_table::mmap_delete( uintptr_t logical_addr, size_t length ) { long pgsize = sysconf(_SC_PAGESIZE); uintptr_t s = logical_addr & ~(pgsize-1); /* first page; 0 out lower bits */ uintptr_t e = (logical_addr+length+pgsize-1) & ~(pgsize-1); /* first page NOT IN MAPPING; 0 out lower bits */ debug(D_DEBUG, "munmap(%016" PRIxPTR ", %" PRIxPTR ") --> unmap [%016" PRIxPTR ", %016" PRIxPTR ")", logical_addr, length, s, e); for(pfs_mmap *m = mmap_list, **p = &mmap_list; m; p=&m->next, m=m->next) { if( s >= m->logical_addr && ( s < (m->logical_addr+m->map_length ) ) ) { *p = m->next; // Remove the map from the list. // Write back the portion of the file that is mapped in. if(m->flags&MAP_SHARED && m->prot&PROT_WRITE && m->file) { save_file_from_channel(m->file,m->file_offset,m->channel_offset,m->map_length,1024*1024); } /* If we are deleting a mapping that has no logical address, then mmap failed. Don't attempt to split the mapping. */ if (!(s == 0 && length == 0)) { // If there is a fragment left over before the unmap, add it as a new map // This will increase the reference count of both the file and the memory object. if(m->logical_addr < s) { pfs_mmap *newmap = new pfs_mmap(m); newmap->map_length = s - m->logical_addr; newmap->next = *p; *p = newmap; debug(D_DEBUG, "split off memory fragment [%016" PRIxPTR ", %016" PRIxPTR ") size = %zu", newmap->logical_addr, newmap->logical_addr+newmap->map_length, newmap->map_length); } // If there is a fragment left over after the unmap, add it as a new map // This will increase the reference count of both the file and the memory object. if(e < (m->logical_addr+m->map_length)) { pfs_mmap *newmap = new pfs_mmap(m); newmap->logical_addr = e; newmap->map_length -= e - m->logical_addr; newmap->file_offset += e - m->logical_addr; newmap->next = *p; *p = newmap; debug(D_DEBUG, "split off memory fragment [%016" PRIxPTR ", %016" PRIxPTR ") size = %zu", newmap->logical_addr, newmap->logical_addr+newmap->map_length, newmap->map_length); } } delete m; // Delete the mapping, which may also delete the file object and free the channel. return 0; } } /* It is quite common that an munmap will not match any existing mapping. This happens particularly for anonymous mmaps, which are not recorded here. In this case, simply return succcess; */ return 0; } /* vim: set noexpandtab tabstop=4: */
1
14,635
Tim, please add { } to this 'if'.
cooperative-computing-lab-cctools
c
@@ -11,8 +11,11 @@ import ( ) const ( - // DefaultPort is used when no port is specified + // DefaultPort is used when no port is specified. DefaultPort = "31000" + + // DefaultHost is the host that will be used when none is specified. + DefaultHost = "127.0.0.1" ) // Reporter will gather metrics of API requests made and
1
package csm import ( "encoding/json" "net" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" ) const ( // DefaultPort is used when no port is specified DefaultPort = "31000" ) // Reporter will gather metrics of API requests made and // send those metrics to the CSM endpoint. type Reporter struct { clientID string url string conn net.Conn metricsCh metricChan done chan struct{} } var ( sender *Reporter ) func connect(url string) error { const network = "udp" if err := sender.connect(network, url); err != nil { return err } if sender.done == nil { sender.done = make(chan struct{}) go sender.start() } return nil } func newReporter(clientID, url string) *Reporter { return &Reporter{ clientID: clientID, url: url, metricsCh: newMetricChan(MetricsChannelSize), } } func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { if rep == nil { return } now := time.Now() creds, _ := r.Config.Credentials.Get() m := metric{ ClientID: aws.String(rep.clientID), API: aws.String(r.Operation.Name), Service: aws.String(r.ClientInfo.ServiceID), Timestamp: (*metricTime)(&now), UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), Region: r.Config.Region, Type: aws.String("ApiCallAttempt"), Version: aws.Int(1), XAmzRequestID: aws.String(r.RequestID), AttemptCount: aws.Int(r.RetryCount + 1), AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), AccessKey: aws.String(creds.AccessKeyID), } if r.HTTPResponse != nil { m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) } if r.Error != nil { if awserr, ok := r.Error.(awserr.Error); ok { m.SetException(getMetricException(awserr)) } } m.TruncateFields() rep.metricsCh.Push(m) } func getMetricException(err awserr.Error) metricException { msg := err.Error() code := err.Code() switch code { case "RequestError", request.ErrCodeSerialization, request.CanceledErrorCode: return sdkException{ requestException{exception: code, message: msg}, } default: return awsException{ requestException{exception: code, message: msg}, } } } func (rep *Reporter) sendAPICallMetric(r *request.Request) { if rep == nil { return } now := time.Now() m := metric{ ClientID: aws.String(rep.clientID), API: aws.String(r.Operation.Name), Service: aws.String(r.ClientInfo.ServiceID), Timestamp: (*metricTime)(&now), UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), Type: aws.String("ApiCall"), AttemptCount: aws.Int(r.RetryCount + 1), Region: r.Config.Region, Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), XAmzRequestID: aws.String(r.RequestID), MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), } if r.HTTPResponse != nil { m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) } if r.Error != nil { if awserr, ok := r.Error.(awserr.Error); ok { m.SetFinalException(getMetricException(awserr)) } } m.TruncateFields() // TODO: Probably want to figure something out for logging dropped // metrics rep.metricsCh.Push(m) } func (rep *Reporter) connect(network, url string) error { if rep.conn != nil { rep.conn.Close() } conn, err := net.Dial(network, url) if err != nil { return awserr.New("UDPError", "Could not connect", err) } rep.conn = conn return nil } func (rep *Reporter) close() { if rep.done != nil { close(rep.done) } rep.metricsCh.Pause() } func (rep *Reporter) start() { defer func() { rep.metricsCh.Pause() }() for { select { case <-rep.done: rep.done = nil return case m := <-rep.metricsCh.ch: // TODO: What to do with this error? Probably should just log b, err := json.Marshal(m) if err != nil { continue } rep.conn.Write(b) } } } // Pause will pause the metric channel preventing any new metrics from // being added. func (rep *Reporter) Pause() { lock.Lock() defer lock.Unlock() if rep == nil { return } rep.close() } // Continue will reopen the metric channel and allow for monitoring // to be resumed. func (rep *Reporter) Continue() { lock.Lock() defer lock.Unlock() if rep == nil { return } if !rep.metricsCh.IsPaused() { return } rep.metricsCh.Continue() } // InjectHandlers will will enable client side metrics and inject the proper // handlers to handle how metrics are sent. // // Example: // // Start must be called in order to inject the correct handlers // r, err := csm.Start("clientID", "127.0.0.1:8094") // if err != nil { // panic(fmt.Errorf("expected no error, but received %v", err)) // } // // sess := session.NewSession() // r.InjectHandlers(&sess.Handlers) // // // create a new service client with our client side metric session // svc := s3.New(sess) func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { if rep == nil { return } handlers.Complete.PushFrontNamed(request.NamedHandler{ Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric, }) handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric, }) } // boolIntValue return 1 for true and 0 for false. func boolIntValue(b bool) int { if b { return 1 } return 0 }
1
9,765
Suggest moving to `enable.go` since thats only place used.
aws-aws-sdk-go
go
@@ -54,7 +54,7 @@ describe "apply" do # Includes agent facts from apply_prep agent_facts = report['resource_statuses']['Notify[agent facts]']['events'][0]['desired_value'].split("\n") - expect(agent_facts[0]).to match(/^\w+\./) + expect(agent_facts[0]).to match(/^\w+/) expect(agent_facts[1]).to eq(agent_facts[0]) expect(agent_facts[2]).to match(/^\d+\.\d+\.\d+$/) expect(agent_facts[3]).to eq(agent_facts[2])
1
# frozen_string_literal: true require 'spec_helper' require 'bolt_spec/conn' require 'bolt_spec/files' require 'bolt_spec/integration' require 'bolt_spec/run' describe "apply" do include BoltSpec::Conn include BoltSpec::Files include BoltSpec::Integration include BoltSpec::Run let(:modulepath) { File.join(__dir__, '../fixtures/apply') } let(:config_flags) { %W[--format json --nodes #{uri} --password #{password} --modulepath #{modulepath}] + tflags } describe 'over ssh', ssh: true do let(:uri) { conn_uri('ssh') } let(:password) { conn_info('ssh')[:password] } let(:tflags) { %W[--no-host-key-check --run-as root --sudo-password #{password}] } def root_config { 'modulepath' => File.join(__dir__, '../fixtures/apply'), 'ssh' => { 'run-as' => 'root', 'sudo-password' => conn_info('ssh')[:password], 'host-key-check' => false } } end after(:all) do # TODO: Extract into test helper if needed in more files uri = conn_uri('ssh') inventory_data = conn_inventory config_data = root_config uninstall = '/opt/puppetlabs/bin/puppet resource package puppet-agent ensure=absent' run_command(uninstall, uri, config: config_data, inventory: inventory_data) end context "when installing puppet" do before(:each) do uninstall = '/opt/puppetlabs/bin/puppet resource package puppet-agent ensure=absent' run_cli_json(%W[command run #{uninstall}] + config_flags) end it 'succeeds when run twice' do result = run_cli_json(%w[plan run prep] + config_flags) expect(result).not_to include('kind') expect(result.count).to eq(1) expect(result[0]['status']).to eq('success') report = result[0]['result']['report'] expect(report['resource_statuses']).to include("Notify[Hello #{conn_info('ssh')[:host]}]") # Includes agent facts from apply_prep agent_facts = report['resource_statuses']['Notify[agent facts]']['events'][0]['desired_value'].split("\n") expect(agent_facts[0]).to match(/^\w+\./) expect(agent_facts[1]).to eq(agent_facts[0]) expect(agent_facts[2]).to match(/^\d+\.\d+\.\d+$/) expect(agent_facts[3]).to eq(agent_facts[2]) expect(agent_facts[4]).to eq('false') result = run_cli_json(%w[plan run prep] + config_flags) expect(result.count).to eq(1) expect(result[0]['status']).to eq('success') report = result[0]['result']['report'] expect(report['resource_statuses']).to include("Notify[Hello #{conn_info('ssh')[:host]}]") end end context "with a puppet_agent installed" do before(:all) do # TODO: Extract into test helper if needed in more files uri = conn_uri('ssh') inventory_data = conn_inventory config_data = root_config run_task('puppet_agent::install', uri, config: config_data, inventory: inventory_data) end it 'errors when there are resource failures' do result = run_cli_json(%w[plan run basic::failure] + config_flags, rescue_exec: true) expect(result).to include('kind' => 'bolt/apply-failure') error = result['details']['result_set'][0]['result']['_error'] expect(error['kind']).to eq('bolt/resource-failure') expect(error['msg']).to match(/Resources failed to apply/) end it 'applies a notify and ignores local settings' do run_command('echo environment=doesnotexist > /etc/puppetlabs/puppet/puppet.conf', uri, config: root_config, inventory: conn_inventory) result = run_cli_json(%w[plan run basic::class] + config_flags) expect(result).not_to include('kind') expect(result[0]).to include('status' => 'success') expect(result[0]['result']['_output']).to eq('changed: 1, failed: 0, unchanged: 0 skipped: 0, noop: 0') resources = result[0]['result']['report']['resource_statuses'] expect(resources).to include('Notify[hello world]') end it 'applies the deferred type' do result = run_cli_json(%w[plan run basic::defer] + config_flags) expect(result).not_to include('kind') expect(result[0]['status']).to eq('success') resources = result[0]['result']['report']['resource_statuses'] local_pid = resources['Notify[local pid]']['events'][0]['desired_value'][/(\d+)/, 1] raise 'local pid was not found' if local_pid.nil? remote_pid = resources['Notify[remote pid]']['events'][0]['desired_value'][/(\d+)/, 1] raise 'remote pid was not found' if remote_pid.nil? expect(local_pid).not_to eq(remote_pid) end end end end
1
9,713
This changed when I rebuilt my docker container. Not quite sure why, but might have to do with what my host network configuration looks like when it's rebuilt.
puppetlabs-bolt
rb
@@ -13,6 +13,7 @@ import ( "github.com/restic/restic/server" ) +// Cache is used to handle the local cache. type Cache struct { base string }
1
package restic import ( "errors" "fmt" "io" "os" "path/filepath" "strings" "github.com/restic/restic/backend" "github.com/restic/restic/debug" "github.com/restic/restic/server" ) type Cache struct { base string } func NewCache(be backend.Identifier) (*Cache, error) { cacheDir, err := getCacheDir() if err != nil { return nil, err } basedir := filepath.Join(cacheDir, be.ID()) debug.Log("Cache.New", "opened cache at %v", basedir) return &Cache{base: basedir}, nil } func (c *Cache) Has(t backend.Type, subtype string, id backend.ID) (bool, error) { filename, err := c.filename(t, subtype, id) if err != nil { return false, err } fd, err := os.Open(filename) defer fd.Close() if err != nil { if os.IsNotExist(err) { debug.Log("Cache.Has", "test for file %v: not cached", filename) return false, nil } debug.Log("Cache.Has", "test for file %v: error %v", filename, err) return false, err } debug.Log("Cache.Has", "test for file %v: is cached", filename) return true, nil } func (c *Cache) Store(t backend.Type, subtype string, id backend.ID) (io.WriteCloser, error) { filename, err := c.filename(t, subtype, id) if err != nil { return nil, err } dirname := filepath.Dir(filename) err = os.MkdirAll(dirname, 0700) if err != nil { return nil, err } file, err := os.Create(filename) if err != nil { debug.Log("Cache.Store", "error creating file %v: %v", filename, err) return nil, err } debug.Log("Cache.Store", "created file %v", filename) return file, nil } func (c *Cache) Load(t backend.Type, subtype string, id backend.ID) (io.ReadCloser, error) { filename, err := c.filename(t, subtype, id) if err != nil { return nil, err } return os.Open(filename) } func (c *Cache) purge(t backend.Type, subtype string, id backend.ID) error { filename, err := c.filename(t, subtype, id) if err != nil { return err } err = os.Remove(filename) debug.Log("Cache.purge", "Remove file %v: %v", filename, err) if err != nil && os.IsNotExist(err) { return nil } return err } func (c *Cache) Clear(s *server.Server) error { list, err := c.List(backend.Snapshot) if err != nil { return err } for _, entry := range list { debug.Log("Cache.Clear", "found entry %v", entry) if ok, err := s.Test(backend.Snapshot, entry.ID.String()); !ok || err != nil { debug.Log("Cache.Clear", "snapshot %v doesn't exist any more, removing %v", entry.ID, entry) err = c.purge(backend.Snapshot, entry.Subtype, entry.ID) if err != nil { return err } } } return nil } type CacheEntry struct { ID backend.ID Subtype string } func (c CacheEntry) String() string { if c.Subtype != "" { return c.ID.Str() + "." + c.Subtype } return c.ID.Str() } func (c *Cache) List(t backend.Type) ([]CacheEntry, error) { var dir string switch t { case backend.Snapshot: dir = filepath.Join(c.base, "snapshots") default: return nil, fmt.Errorf("cache not supported for type %v", t) } fd, err := os.Open(dir) if err != nil { if os.IsNotExist(err) { return []CacheEntry{}, nil } return nil, err } defer fd.Close() fis, err := fd.Readdir(-1) if err != nil { return nil, err } entries := make([]CacheEntry, 0, len(fis)) for _, fi := range fis { parts := strings.SplitN(fi.Name(), ".", 2) id, err := backend.ParseID(parts[0]) // ignore invalid cache entries for now if err != nil { debug.Log("Cache.List", "unable to parse name %v as id: %v", parts[0], err) continue } e := CacheEntry{ID: id} if len(parts) == 2 { e.Subtype = parts[1] } entries = append(entries, e) } return entries, nil } func (c *Cache) filename(t backend.Type, subtype string, id backend.ID) (string, error) { filename := id.String() if subtype != "" { filename += "." + subtype } switch t { case backend.Snapshot: return filepath.Join(c.base, "snapshots", filename), nil } return "", fmt.Errorf("cache not supported for type %v", t) } func getCacheDir() (string, error) { if dir := os.Getenv("RESTIC_CACHE"); dir != "" { return dir, nil } return getXDGCacheDir() } // getXDGCacheDir returns the cache directory according to XDG basedir spec, see // http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html func getXDGCacheDir() (string, error) { xdgcache := os.Getenv("XDG_CACHE_HOME") home := os.Getenv("HOME") if xdgcache == "" && home == "" { return "", errors.New("unable to locate cache directory (XDG_CACHE_HOME and HOME unset)") } cachedir := "" if xdgcache != "" { cachedir = filepath.Join(xdgcache, "restic") } else if home != "" { cachedir = filepath.Join(home, ".cache", "restic") } fi, err := os.Stat(cachedir) if os.IsNotExist(err) { err = os.MkdirAll(cachedir, 0700) if err != nil { return "", err } fi, err = os.Stat(cachedir) debug.Log("getCacheDir", "create cache dir %v", cachedir) } if err != nil { return "", err } if !fi.IsDir() { return "", fmt.Errorf("cache dir %v is not a directory", cachedir) } return cachedir, nil }
1
6,450
Local cache of what? What's stored in it?
restic-restic
go
@@ -176,7 +176,7 @@ public class DataFiles { this.keyMetadata = toCopy.keyMetadata() == null ? null : ByteBuffers.copy(toCopy.keyMetadata()); this.splitOffsets = toCopy.splitOffsets() == null ? null : copyList(toCopy.splitOffsets()); - this.sortOrderId = toCopy.sortOrderId(); + this.sortOrderId = toCopy.sortOrderId() == null ? SortOrder.unsorted().orderId() : toCopy.sortOrderId(); return this; }
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import java.nio.ByteBuffer; import java.util.List; import java.util.Locale; import java.util.Map; import org.apache.hadoop.fs.FileStatus; import org.apache.iceberg.encryption.EncryptedOutputFile; import org.apache.iceberg.encryption.EncryptionKeyMetadata; import org.apache.iceberg.hadoop.HadoopInputFile; import org.apache.iceberg.io.InputFile; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.types.Conversions; import org.apache.iceberg.util.ByteBuffers; public class DataFiles { private DataFiles() { } static PartitionData newPartitionData(PartitionSpec spec) { return new PartitionData(spec.partitionType()); } static PartitionData copyPartitionData(PartitionSpec spec, StructLike partitionData, PartitionData reuse) { PartitionData data = reuse; if (data == null) { data = newPartitionData(spec); } Class<?>[] javaClasses = spec.javaClasses(); List<PartitionField> fields = spec.fields(); for (int i = 0; i < fields.size(); i += 1) { data.set(i, partitionData.get(i, javaClasses[i])); } return data; } static PartitionData fillFromPath(PartitionSpec spec, String partitionPath, PartitionData reuse) { PartitionData data = reuse; if (data == null) { data = newPartitionData(spec); } String[] partitions = partitionPath.split("/", -1); Preconditions.checkArgument(partitions.length <= spec.fields().size(), "Invalid partition data, too many fields (expecting %s): %s", spec.fields().size(), partitionPath); Preconditions.checkArgument(partitions.length >= spec.fields().size(), "Invalid partition data, not enough fields (expecting %s): %s", spec.fields().size(), partitionPath); for (int i = 0; i < partitions.length; i += 1) { PartitionField field = spec.fields().get(i); String[] parts = partitions[i].split("=", 2); Preconditions.checkArgument( parts.length == 2 && parts[0] != null && field.name().equals(parts[0]), "Invalid partition: %s", partitions[i]); data.set(i, Conversions.fromPartitionString(data.getType(i), parts[1])); } return data; } public static PartitionData data(PartitionSpec spec, String partitionPath) { return fillFromPath(spec, partitionPath, null); } public static PartitionData copy(PartitionSpec spec, StructLike partition) { return copyPartitionData(spec, partition, null); } public static DataFile fromManifest(ManifestFile manifest) { Preconditions.checkArgument( manifest.addedFilesCount() != null && manifest.existingFilesCount() != null, "Cannot create data file from manifest: data file counts are missing."); return DataFiles.builder(PartitionSpec.unpartitioned()) .withPath(manifest.path()) .withFormat(FileFormat.AVRO) .withRecordCount(manifest.addedFilesCount() + manifest.existingFilesCount()) .withFileSizeInBytes(manifest.length()) .build(); } public static Builder builder(PartitionSpec spec) { return new Builder(spec); } public static class Builder { private final PartitionSpec spec; private final boolean isPartitioned; private final int specId; private PartitionData partitionData; private String filePath = null; private FileFormat format = null; private long recordCount = -1L; private long fileSizeInBytes = -1L; private int sortOrderId = SortOrder.unsorted().orderId(); // optional fields private Map<Integer, Long> columnSizes = null; private Map<Integer, Long> valueCounts = null; private Map<Integer, Long> nullValueCounts = null; private Map<Integer, Long> nanValueCounts = null; private Map<Integer, ByteBuffer> lowerBounds = null; private Map<Integer, ByteBuffer> upperBounds = null; private ByteBuffer keyMetadata = null; private List<Long> splitOffsets = null; public Builder(PartitionSpec spec) { this.spec = spec; this.specId = spec.specId(); this.isPartitioned = spec.fields().size() > 0; this.partitionData = isPartitioned ? newPartitionData(spec) : null; } public void clear() { if (isPartitioned) { partitionData.clear(); } this.filePath = null; this.format = null; this.recordCount = -1L; this.fileSizeInBytes = -1L; this.columnSizes = null; this.valueCounts = null; this.nullValueCounts = null; this.nanValueCounts = null; this.lowerBounds = null; this.upperBounds = null; this.splitOffsets = null; this.sortOrderId = SortOrder.unsorted().orderId(); } public Builder copy(DataFile toCopy) { if (isPartitioned) { Preconditions.checkState(specId == toCopy.specId(), "Cannot copy a DataFile with a different spec"); this.partitionData = copyPartitionData(spec, toCopy.partition(), partitionData); } this.filePath = toCopy.path().toString(); this.format = toCopy.format(); this.recordCount = toCopy.recordCount(); this.fileSizeInBytes = toCopy.fileSizeInBytes(); this.columnSizes = toCopy.columnSizes(); this.valueCounts = toCopy.valueCounts(); this.nullValueCounts = toCopy.nullValueCounts(); this.nanValueCounts = toCopy.nanValueCounts(); this.lowerBounds = toCopy.lowerBounds(); this.upperBounds = toCopy.upperBounds(); this.keyMetadata = toCopy.keyMetadata() == null ? null : ByteBuffers.copy(toCopy.keyMetadata()); this.splitOffsets = toCopy.splitOffsets() == null ? null : copyList(toCopy.splitOffsets()); this.sortOrderId = toCopy.sortOrderId(); return this; } public Builder withStatus(FileStatus stat) { this.filePath = stat.getPath().toString(); this.fileSizeInBytes = stat.getLen(); return this; } public Builder withInputFile(InputFile file) { if (file instanceof HadoopInputFile) { return withStatus(((HadoopInputFile) file).getStat()); } this.filePath = file.location(); this.fileSizeInBytes = file.getLength(); return this; } public Builder withEncryptedOutputFile(EncryptedOutputFile newEncryptedFile) { withInputFile(newEncryptedFile.encryptingOutputFile().toInputFile()); withEncryptionKeyMetadata(newEncryptedFile.keyMetadata()); return this; } public Builder withPath(String newFilePath) { this.filePath = newFilePath; return this; } public Builder withFormat(String newFormat) { this.format = FileFormat.valueOf(newFormat.toUpperCase(Locale.ENGLISH)); return this; } public Builder withFormat(FileFormat newFormat) { this.format = newFormat; return this; } public Builder withPartition(StructLike newPartition) { this.partitionData = copyPartitionData(spec, newPartition, partitionData); return this; } public Builder withRecordCount(long newRecordCount) { this.recordCount = newRecordCount; return this; } public Builder withFileSizeInBytes(long newFileSizeInBytes) { this.fileSizeInBytes = newFileSizeInBytes; return this; } public Builder withPartitionPath(String newPartitionPath) { Preconditions.checkArgument(isPartitioned || newPartitionPath.isEmpty(), "Cannot add partition data for an unpartitioned table"); if (!newPartitionPath.isEmpty()) { this.partitionData = fillFromPath(spec, newPartitionPath, partitionData); } return this; } public Builder withMetrics(Metrics metrics) { // check for null to avoid NPE when unboxing this.recordCount = metrics.recordCount() == null ? -1 : metrics.recordCount(); this.columnSizes = metrics.columnSizes(); this.valueCounts = metrics.valueCounts(); this.nullValueCounts = metrics.nullValueCounts(); this.nanValueCounts = metrics.nanValueCounts(); this.lowerBounds = metrics.lowerBounds(); this.upperBounds = metrics.upperBounds(); return this; } public Builder withSplitOffsets(List<Long> offsets) { if (offsets != null) { this.splitOffsets = copyList(offsets); } else { this.splitOffsets = null; } return this; } public Builder withEncryptionKeyMetadata(ByteBuffer newKeyMetadata) { this.keyMetadata = newKeyMetadata; return this; } public Builder withEncryptionKeyMetadata(EncryptionKeyMetadata newKeyMetadata) { return withEncryptionKeyMetadata(newKeyMetadata.buffer()); } public Builder withSortOrder(SortOrder newSortOrder) { if (newSortOrder != null) { this.sortOrderId = newSortOrder.orderId(); } return this; } public DataFile build() { Preconditions.checkArgument(filePath != null, "File path is required"); if (format == null) { this.format = FileFormat.fromFileName(filePath); } Preconditions.checkArgument(format != null, "File format is required"); Preconditions.checkArgument(fileSizeInBytes >= 0, "File size is required"); Preconditions.checkArgument(recordCount >= 0, "Record count is required"); return new GenericDataFile( specId, filePath, format, isPartitioned ? partitionData.copy() : null, fileSizeInBytes, new Metrics( recordCount, columnSizes, valueCounts, nullValueCounts, nanValueCounts, lowerBounds, upperBounds), keyMetadata, splitOffsets, sortOrderId); } } private static <E> List<E> copyList(List<E> toCopy) { List<E> copy = Lists.newArrayListWithExpectedSize(toCopy.size()); copy.addAll(toCopy); return copy; } }
1
39,517
If the copied `DataFile` returns null, shouldn't the copy also return null? Why not make the builder use `Integer` instead of a primitive here?
apache-iceberg
java
@@ -2,11 +2,18 @@ import os from jinja2 import Environment, FileSystemLoader +HTML_FILES_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'html_files') + def save_html(filename, context, template): path = os.path.dirname(os.path.abspath(__file__)) template_environment = Environment( loader=FileSystemLoader(os.path.join(path, 'templates'))) - outputfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'html_files', filename) + outputfile = os.path.join(HTML_FILES_PATH, filename) with open(outputfile, 'w') as f: html = template_environment.get_template(template).render(context) f.write(html) + + +def check_html_files_dir_path(): + dir_exists = os.path.isdir(HTML_FILES_PATH) + return dir_exists
1
import os from jinja2 import Environment, FileSystemLoader def save_html(filename, context, template): path = os.path.dirname(os.path.abspath(__file__)) template_environment = Environment( loader=FileSystemLoader(os.path.join(path, 'templates'))) outputfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'html_files', filename) with open(outputfile, 'w') as f: html = template_environment.get_template(template).render(context) f.write(html)
1
16,714
You could just do this test in the `save_html` function above, and then the users of the save html function don't have to worry about it.
metabrainz-listenbrainz-server
py
@@ -214,7 +214,7 @@ func TestTaskRunOpts_Validate(t *testing.T) { Name: "my-app", }, nil) }, - wantedError: errors.New("get environment: couldn't find environment dev in the application my-app"), + wantedError: fmt.Errorf("get environment %s config: couldn't find environment dev in the application my-app", "dev"), }, "no workspace": { basicOpts: defaultOpts,
1
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cli import ( "errors" "fmt" "github.com/aws/copilot-cli/internal/pkg/deploy" termprogress "github.com/aws/copilot-cli/internal/pkg/term/progress" "testing" "github.com/aws/copilot-cli/internal/pkg/cli/mocks" "github.com/aws/copilot-cli/internal/pkg/config" "github.com/golang/mock/gomock" "github.com/spf13/afero" "github.com/stretchr/testify/require" ) type basicOpts struct { inCount int inCPU int inMemory int } var defaultOpts = basicOpts{ inCount: 1, inCPU: 256, inMemory: 512, } // NOTE: mock spinner so that it doesn't create log output when testing Execute type mockSpinner struct{} func (s *mockSpinner) Start(label string) {} func (s *mockSpinner) Stop(label string) {} func (s *mockSpinner) Events([]termprogress.TabRow) {} func TestTaskRunOpts_Validate(t *testing.T) { testCases := map[string]struct { basicOpts inName string inImage string inDockerfilePath string inTaskRole string inEnv string inSubnets []string inSecurityGroups []string inEnvVars map[string]string inCommand string appName string mockStore func(m *mocks.Mockstore) mockFileSystem func(mockFS afero.Fs) wantedError error }{ "valid with no flag": { basicOpts: defaultOpts, wantedError: nil, }, "valid with flags image and env": { basicOpts: defaultOpts, inName: "my-task", inImage: "113459295.dkr.ecr.ap-northeast-1.amazonaws.com/my-app", inTaskRole: "exec-role", inEnv: "dev", inEnvVars: map[string]string{ "NAME": "my-app", "ENV": "dev", }, inCommand: "echo hello world", appName: "my-app", mockStore: func(m *mocks.Mockstore) { m.EXPECT().GetApplication("my-app").Return(&config.Application{ Name: "my-app", }, nil) m.EXPECT().GetEnvironment("my-app", "dev").Return(&config.Environment{ App: "my-app", Name: "dev", }, nil) }, wantedError: nil, }, "valid without flags image and env": { basicOpts: defaultOpts, inName: "my-task", inDockerfilePath: "hello/world/Dockerfile", inTaskRole: "exec-role", inSubnets: []string{"subnet-10d938jds"}, inSecurityGroups: []string{"sg-0d9sjdk", "sg-d33kds99"}, inEnvVars: map[string]string{ "NAME": "pj", "ENV": "dev", }, inCommand: "echo hello world", mockFileSystem: func(mockFS afero.Fs) { mockFS.MkdirAll("hello/world", 0755) afero.WriteFile(mockFS, "hello/world/Dockerfile", []byte("FROM nginx"), 0644) }, wantedError: nil, }, "invalid number of tasks": { basicOpts: basicOpts{ inCount: -1, inCPU: 256, inMemory: 512, }, wantedError: errNumNotPositive, }, "invalid number of CPU units": { basicOpts: basicOpts{ inCount: 1, inCPU: -15, inMemory: 512, }, wantedError: errCpuNotPositive, }, "invalid memory": { basicOpts: basicOpts{ inCount: 1, inCPU: 256, inMemory: -1024, }, wantedError: errMemNotPositive, }, "both dockerfile and image name specified": { basicOpts: defaultOpts, inImage: "113459295.dkr.ecr.ap-northeast-1.amazonaws.com/my-app", inDockerfilePath: "hello/world/Dockerfile", wantedError: errors.New("cannot specify both image and Dockerfile path"), }, "invalid dockerfile path": { basicOpts: defaultOpts, inDockerfilePath: "world/hello/Dockerfile", wantedError: errors.New("open world/hello/Dockerfile: file does not exist"), }, "specified app exists": { basicOpts: defaultOpts, appName: "my-app", mockStore: func(m *mocks.Mockstore) { m.EXPECT().GetApplication("my-app").Return(&config.Application{ Name: "my-app", }, nil) }, wantedError: nil, }, "unknown app": { basicOpts: defaultOpts, appName: "my-app", mockStore: func(m *mocks.Mockstore) { m.EXPECT().GetApplication("my-app").Return(nil, &config.ErrNoSuchApplication{ ApplicationName: "my-app", AccountID: "115", Region: "us-east-1", }) }, wantedError: fmt.Errorf("get application: couldn't find an application named my-app in account 115 and region us-east-1"), }, "env exists in app": { basicOpts: defaultOpts, appName: "my-app", inEnv: "dev", mockStore: func(m *mocks.Mockstore) { m.EXPECT().GetEnvironment("my-app", "dev").Return(&config.Environment{ App: "my-app", Name: "dev", }, nil) m.EXPECT().GetApplication("my-app").Return(&config.Application{ Name: "my-app", }, nil) }, wantedError: nil, }, "unknown env in app": { basicOpts: defaultOpts, appName: "my-app", inEnv: "dev", mockStore: func(m *mocks.Mockstore) { m.EXPECT().GetEnvironment("my-app", "dev").Return(nil, &config.ErrNoSuchEnvironment{ ApplicationName: "my-app", EnvironmentName: "dev", }) m.EXPECT().GetApplication("my-app").Return(&config.Application{ Name: "my-app", }, nil) }, wantedError: errors.New("get environment: couldn't find environment dev in the application my-app"), }, "no workspace": { basicOpts: defaultOpts, inEnv: "test", wantedError: errNoAppInWorkspace, }, "both environment and subnet id specified": { basicOpts: defaultOpts, inEnv: "test", inSubnets: []string{"subnet id"}, wantedError: errors.New("neither subnet nor security groups should be specified if environment is specified"), }, "both environment and security groups specified": { basicOpts: defaultOpts, inEnv: "test", inSecurityGroups: []string{"security group id1", "securty group id2"}, wantedError: errors.New("neither subnet nor security groups should be specified if environment is specified"), }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockStore := mocks.NewMockstore(ctrl) opts := runTaskOpts{ runTaskVars: runTaskVars{ GlobalOpts: &GlobalOpts{ appName: tc.appName, }, count: tc.inCount, cpu: tc.inCPU, memory: tc.inMemory, groupName: tc.inName, image: tc.inImage, env: tc.inEnv, taskRole: tc.inTaskRole, subnets: tc.inSubnets, securityGroups: tc.inSecurityGroups, dockerfilePath: tc.inDockerfilePath, envVars: tc.inEnvVars, command: tc.inCommand, }, fs: &afero.Afero{Fs: afero.NewMemMapFs()}, store: mockStore, } if tc.mockFileSystem != nil { tc.mockFileSystem(opts.fs) } if tc.mockStore != nil { tc.mockStore(mockStore) } err := opts.Validate() if tc.wantedError != nil { require.EqualError(t, err, tc.wantedError.Error()) } else { require.NoError(t, err) } }) } } func TestTaskRunOpts_Ask(t *testing.T) { testCases := map[string]struct { basicOpts inName string inEnv string appName string mockSel func(m *mocks.MockappEnvSelector) mockPrompt func(m *mocks.Mockprompter) wantedError error wantedEnv string wantedName string }{ "selected an existing environment": { basicOpts: defaultOpts, inName: "my-task", appName: "my-app", mockSel: func(m *mocks.MockappEnvSelector) { m.EXPECT().Environment(fmtTaskRunEnvPrompt, gomock.Any(), "my-app", envOptionNone).Return("test", nil) }, wantedEnv: "test", }, "selected None env": { basicOpts: defaultOpts, inName: "my-task", appName: "my-app", mockSel: func(m *mocks.MockappEnvSelector) { m.EXPECT().Environment(fmtTaskRunEnvPrompt, gomock.Any(), "my-app", envOptionNone).Return(envOptionNone, nil) }, wantedEnv: "", }, "don't prompt if env is provided": { basicOpts: defaultOpts, inName: "my-task", inEnv: "test", appName: "my-app", mockSel: func(m *mocks.MockappEnvSelector) { m.EXPECT().Environment(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0) }, wantedEnv: "test", }, "don't prompt if no workspace": { basicOpts: defaultOpts, inName: "my-task", mockSel: func(m *mocks.MockappEnvSelector) { m.EXPECT().Environment(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0) }, wantedEnv: "", }, "prompt for task family name": { basicOpts: defaultOpts, mockPrompt: func(m *mocks.Mockprompter) { m.EXPECT().Get(fmtTaskRunGroupNamePrompt, gomock.Any(), gomock.Any(), gomock.Any()).Return("my-task", nil) }, wantedName: "my-task", }, "error getting task group name": { mockPrompt: func(m *mocks.Mockprompter) { m.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return("", errors.New("error getting task group name")) }, wantedError: errors.New("prompt get task group name: error getting task group name"), }, "error selecting environment": { basicOpts: defaultOpts, inName: "my-task", appName: "my-app", mockSel: func(m *mocks.MockappEnvSelector) { m.EXPECT().Environment(fmtTaskRunEnvPrompt, gomock.Any(), gomock.Any(), envOptionNone). Return("", fmt.Errorf("error selecting environment")) }, wantedError: errors.New("ask for environment: error selecting environment"), }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockSel := mocks.NewMockappEnvSelector(ctrl) mockPrompter := mocks.NewMockprompter(ctrl) if tc.mockSel != nil { tc.mockSel(mockSel) } if tc.mockPrompt != nil { tc.mockPrompt(mockPrompter) } opts := runTaskOpts{ runTaskVars: runTaskVars{ GlobalOpts: &GlobalOpts{ appName: tc.appName, prompt: mockPrompter, }, count: tc.inCount, cpu: tc.inCPU, memory: tc.inMemory, groupName: tc.inName, env: tc.inEnv, }, sel: mockSel, } err := opts.Ask() if tc.wantedError == nil { require.NoError(t, err) require.Equal(t, tc.wantedEnv, opts.env) if tc.wantedName != "" { require.Equal(t, tc.wantedName, opts.groupName) } } else { require.EqualError(t, tc.wantedError, err.Error()) } }) } } func TestTaskRunOpts_Execute(t *testing.T) { var mockDeployer *mocks.MocktaskDeployer const inGroupName = "my-task" testCases := map[string]struct { inImage string setupMocks func(ctrl *gomock.Controller) wantedError error }{ "error deploying resources": { setupMocks: func(ctrl *gomock.Controller) { mockDeployer = mocks.NewMocktaskDeployer(ctrl) mockDeployer.EXPECT().DeployTask(&deploy.CreateTaskResourcesInput{ Name: inGroupName, Image: "", }).Return(errors.New("error deploying")) }, wantedError: fmt.Errorf("provision resources for task %s: %w", inGroupName, errors.New("error deploying")), }, "error updating resources": { setupMocks: func(ctrl *gomock.Controller) { mockDeployer = mocks.NewMocktaskDeployer(ctrl) mockDeployer.EXPECT().DeployTask(&deploy.CreateTaskResourcesInput{ Name: inGroupName, Image: "", }).Return(nil) mockDeployer.EXPECT().DeployTask(&deploy.CreateTaskResourcesInput{ Name: inGroupName, // TODO: use image.URI() from mockRepository }).Times(1).Return(errors.New("error updating")) }, wantedError: fmt.Errorf("update resources for task %s: %w", inGroupName, errors.New("error updating")), }, "update image to task resource if image is not provided": { setupMocks: func(ctrl *gomock.Controller) { mockDeployer = mocks.NewMocktaskDeployer(ctrl) // TODO: mock repository mockDeployer.EXPECT().DeployTask(&deploy.CreateTaskResourcesInput{ Name: inGroupName, Image: "", }).Times(1).Return(nil) mockDeployer.EXPECT().DeployTask(&deploy.CreateTaskResourcesInput{ Name: inGroupName, // TODO: use image.URI() from mockRepository }).Times(1).Return(nil) }, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { ctrl := gomock.NewController(t) tc.setupMocks(ctrl) opts := &runTaskOpts{ runTaskVars: runTaskVars{ image: tc.inImage, groupName: inGroupName, }, spinner: &mockSpinner{}, deployer: mockDeployer, } err := opts.Execute() if tc.wantedError != nil { require.EqualError(t, tc.wantedError, err.Error()) } else { require.NoError(t, err) } }) } }
1
14,279
I think I'd prefer not using the formatted string when it comes to unit test, since we don't pass in any string variable as params.
aws-copilot-cli
go
@@ -1,4 +1,4 @@ -// +build avr esp nrf sam sifive stm32 k210 nxp +// +build avr esp nrf sam sifive stm32 k210 nxp,!mimxrt1062 package machine
1
// +build avr esp nrf sam sifive stm32 k210 nxp package machine import "errors" var errUARTBufferEmpty = errors.New("UART buffer empty") type UARTConfig struct { BaudRate uint32 TX Pin RX Pin } // To implement the UART interface for a board, you must declare a concrete type as follows: // // type UART struct { // Buffer *RingBuffer // } // // You can also add additional members to this struct depending on your implementation, // but the *RingBuffer is required. // When you are declaring your UARTs for your board, make sure that you also declare the // RingBuffer using the NewRingBuffer() function when you declare your UART: // // UART{Buffer: NewRingBuffer()} // // Read from the RX buffer. func (uart UART) Read(data []byte) (n int, err error) { // check if RX buffer is empty size := uart.Buffered() if size == 0 { return 0, nil } // Make sure we do not read more from buffer than the data slice can hold. if len(data) < size { size = len(data) } // only read number of bytes used from buffer for i := 0; i < size; i++ { v, _ := uart.ReadByte() data[i] = v } return size, nil } // Write data to the UART. func (uart UART) Write(data []byte) (n int, err error) { for _, v := range data { uart.WriteByte(v) } return len(data), nil } // ReadByte reads a single byte from the RX buffer. // If there is no data in the buffer, returns an error. func (uart UART) ReadByte() (byte, error) { // check if RX buffer is empty buf, ok := uart.Buffer.Get() if !ok { return 0, errUARTBufferEmpty } return buf, nil } // Buffered returns the number of bytes currently stored in the RX buffer. func (uart UART) Buffered() int { return int(uart.Buffer.Used()) } // Receive handles adding data to the UART's data buffer. // Usually called by the IRQ handler for a machine. func (uart UART) Receive(data byte) { uart.Buffer.Put(data) }
1
10,873
Maybe it would be better to explicitly include devices instead of explicitly excluding devices?
tinygo-org-tinygo
go
@@ -475,10 +475,10 @@ void ROMol::clearComputedProps(bool includeRings) const { RDProps::clearComputedProps(); - for (ConstAtomIterator atomIt = this->beginAtoms(); - atomIt != this->endAtoms(); ++atomIt) { - (*atomIt)->clearComputedProps(); + for (auto atom: atoms()) { + atom->clearComputedProps(); } + for (ConstBondIterator bondIt = this->beginBonds(); bondIt != this->endBonds(); bondIt++) { (*bondIt)->clearComputedProps();
1
// // Copyright (C) 2003-2015 Greg Landrum and Rational Discovery LLC // // @@ All Rights Reserved @@ // This file is part of the RDKit. // The contents are covered by the terms of the BSD license // which is included in the file license.txt, found at the root // of the RDKit source tree. // #include <iostream> #include <boost/foreach.hpp> // our stuff #include <RDGeneral/Invariant.h> #include <RDGeneral/RDLog.h> #include "ROMol.h" #include "Atom.h" #include "QueryAtom.h" #include "Bond.h" #include "QueryBond.h" #include "MolPickler.h" #include "Conformer.h" namespace RDKit { class QueryAtom; class QueryBond; const int ci_RIGHTMOST_ATOM = -0xBADBEEF; const int ci_LEADING_BOND = -0xBADBEEF + 1; const int ci_ATOM_HOLDER = -0xDEADD06; void ROMol::destroy() { d_atomBookmarks.clear(); d_bondBookmarks.clear(); d_graph.clear(); if (dp_ringInfo) { delete dp_ringInfo; dp_ringInfo = nullptr; } }; ROMol::ROMol(const std::string &pickle) : RDProps() { initMol(); numBonds = 0; MolPickler::molFromPickle(pickle, *this); numBonds = rdcast<unsigned int>(boost::num_edges(d_graph)); } void ROMol::initFromOther(const ROMol &other, bool quickCopy, int confId) { if (this == &other) return; numBonds = 0; // std::cerr<<" init from other: "<<this<<" "<<&other<<std::endl; // copy over the atoms const MolGraph &oGraph = other.d_graph; ROMol::ATOM_ITER_PAIR atItP = other.getVertices(); while (atItP.first != atItP.second) { addAtom(oGraph[*atItP.first]->copy(), false, true); ++atItP.first; } // and the bonds: ROMol::BOND_ITER_PAIR bondItP = other.getEdges(); while (bondItP.first != bondItP.second) { addBond(oGraph[*(bondItP.first++)]->copy(), true); } // ring information if (dp_ringInfo) delete dp_ringInfo; if (other.dp_ringInfo) { dp_ringInfo = new RingInfo(*(other.dp_ringInfo)); } else { dp_ringInfo = new RingInfo(); } if (!quickCopy) { // copy conformations for (auto ci = other.beginConformers(); ci != other.endConformers(); ++ci) { if (confId < 0 || rdcast<int>((*ci)->getId()) == confId) { auto *conf = new Conformer(*(*ci)); this->addConformer(conf); } } dp_props = other.dp_props; // Bookmarks should be copied as well: BOOST_FOREACH (ATOM_BOOKMARK_MAP::value_type abmI, other.d_atomBookmarks) { BOOST_FOREACH (const Atom *aptr, abmI.second) { setAtomBookmark(getAtomWithIdx(aptr->getIdx()), abmI.first); } } BOOST_FOREACH (BOND_BOOKMARK_MAP::value_type bbmI, other.d_bondBookmarks) { BOOST_FOREACH (const Bond *bptr, bbmI.second) { setBondBookmark(getBondWithIdx(bptr->getIdx()), bbmI.first); } } } else { dp_props.reset(); STR_VECT computed; dp_props.setVal(RDKit::detail::computedPropName, computed); } // std::cerr<<"--------- done init from other: "<<this<<" // "<<&other<<std::endl; } void ROMol::initMol() { dp_props.reset(); dp_ringInfo = new RingInfo(); // ok every molecule contains a property entry called // RDKit::detail::computedPropName // which provides // list of property keys that correspond to value that have been computed // this can used to blow out all computed properties while leaving the rest // along // initialize this list to an empty vector of strings STR_VECT computed; dp_props.setVal(RDKit::detail::computedPropName, computed); } unsigned int ROMol::getAtomDegree(const Atom *at) const { return rdcast<unsigned int>(boost::out_degree(at->getIdx(), d_graph)); }; unsigned int ROMol::getAtomDegree(Atom::ATOM_SPTR at) const { return getAtomDegree(at.get()); }; unsigned int ROMol::getNumAtoms(bool onlyExplicit) const { int res = rdcast<int>(boost::num_vertices(d_graph)); if (!onlyExplicit) { // if we are interested in hydrogens as well add them up from // each for (ConstAtomIterator ai = beginAtoms(); ai != endAtoms(); ++ai) { res += (*ai)->getTotalNumHs(); } } return res; }; unsigned int ROMol::getNumHeavyAtoms() const { unsigned int res = 0; for (ConstAtomIterator ai = beginAtoms(); ai != endAtoms(); ++ai) { if ((*ai)->getAtomicNum() > 1) ++res; } return res; }; Atom *ROMol::getAtomWithIdx(unsigned int idx) { PRECONDITION(getNumAtoms() > 0, "no atoms"); URANGE_CHECK(idx, getNumAtoms()); MolGraph::vertex_descriptor vd = boost::vertex(idx, d_graph); Atom *res = d_graph[vd].get(); POSTCONDITION(res, ""); return res; } const Atom *ROMol::getAtomWithIdx(unsigned int idx) const { PRECONDITION(getNumAtoms() > 0, "no atoms"); URANGE_CHECK(idx, getNumAtoms()); MolGraph::vertex_descriptor vd = boost::vertex(idx, d_graph); const Atom *res = d_graph[vd].get(); POSTCONDITION(res, ""); return res; } // returns the first inserted atom with the given bookmark Atom *ROMol::getAtomWithBookmark(const int mark) { PRECONDITION(d_atomBookmarks.count(mark) != 0, "atom bookmark not found"); PRECONDITION(d_atomBookmarks[mark].begin() != d_atomBookmarks[mark].end(), "atom bookmark not found"); return *(d_atomBookmarks[mark].begin()); }; // returns all atoms with the given bookmark ROMol::ATOM_PTR_LIST &ROMol::getAllAtomsWithBookmark(const int mark) { PRECONDITION(d_atomBookmarks.count(mark) != 0, "atom bookmark not found"); return d_atomBookmarks[mark]; }; // returns the first inserted bond with the given bookmark Bond *ROMol::getBondWithBookmark(const int mark) { PRECONDITION(d_bondBookmarks.count(mark) != 0, "bond bookmark not found"); PRECONDITION(d_bondBookmarks[mark].begin() != d_bondBookmarks[mark].end(), "bond bookmark not found"); return *(d_bondBookmarks[mark].begin()); }; // returns all bonds with the given bookmark ROMol::BOND_PTR_LIST &ROMol::getAllBondsWithBookmark(const int mark) { PRECONDITION(d_bondBookmarks.count(mark) != 0, "bond bookmark not found"); return d_bondBookmarks[mark]; }; void ROMol::clearAtomBookmark(const int mark) { d_atomBookmarks.erase(mark); } void ROMol::clearAtomBookmark(const int mark, const Atom *atom) { if (d_atomBookmarks.count(mark) != 0) { ATOM_PTR_LIST *entry = &d_atomBookmarks[mark]; unsigned int tgtIdx = atom->getIdx(); for (auto i = entry->begin(); i != entry->end(); ++i) { if ((*i)->getIdx() == tgtIdx) { entry->erase(i); break; } } if (entry->begin() == entry->end()) { d_atomBookmarks.erase(mark); } } } void ROMol::clearBondBookmark(const int mark) { d_bondBookmarks.erase(mark); } void ROMol::clearBondBookmark(const int mark, const Bond *bond) { if (d_bondBookmarks.count(mark) != 0) { BOND_PTR_LIST *entry = &d_bondBookmarks[mark]; unsigned int tgtIdx = bond->getIdx(); for (auto i = entry->begin(); i != entry->end(); ++i) { if ((*i)->getIdx() == tgtIdx) { entry->erase(i); break; } } if (entry->begin() == entry->end()) { d_bondBookmarks.erase(mark); } } } unsigned int ROMol::getNumBonds(bool onlyHeavy) const { // By default resturn the bonds that connect only the heavy atoms // hydrogen connecting bonds are ignores int res = rdcast<int>(boost::num_edges(d_graph)); if (!onlyHeavy) { // If we need hydrogen connecting bonds add them up for (ConstAtomIterator ai = beginAtoms(); ai != endAtoms(); ++ai) { res += (*ai)->getTotalNumHs(); } } return res; } Bond *ROMol::getBondWithIdx(unsigned int idx) { PRECONDITION(getNumBonds() > 0, "no bonds"); URANGE_CHECK(idx, getNumBonds()); BOND_ITER_PAIR bIter = getEdges(); for (unsigned int i = 0; i < idx; i++) ++bIter.first; Bond *res = d_graph[*(bIter.first)].get(); POSTCONDITION(res != nullptr, "Invalid bond requested"); return res; } const Bond *ROMol::getBondWithIdx(unsigned int idx) const { PRECONDITION(getNumBonds() > 0, "no bonds"); URANGE_CHECK(idx, getNumBonds()); BOND_ITER_PAIR bIter = getEdges(); for (unsigned int i = 0; i < idx; i++) ++bIter.first; const Bond *res = d_graph[*(bIter.first)].get(); POSTCONDITION(res != nullptr, "Invalid bond requested"); return res; } Bond *ROMol::getBondBetweenAtoms(unsigned int idx1, unsigned int idx2) { URANGE_CHECK(idx1, getNumAtoms()); URANGE_CHECK(idx2, getNumAtoms()); Bond *res = nullptr; MolGraph::edge_descriptor edge; bool found; boost::tie(edge, found) = boost::edge(boost::vertex(idx1, d_graph), boost::vertex(idx2, d_graph), d_graph); if (found) { res = d_graph[edge].get(); } return res; } const Bond *ROMol::getBondBetweenAtoms(unsigned int idx1, unsigned int idx2) const { URANGE_CHECK(idx1, getNumAtoms()); URANGE_CHECK(idx2, getNumAtoms()); const Bond *res = nullptr; MolGraph::edge_descriptor edge; bool found; boost::tie(edge, found) = boost::edge(boost::vertex(idx1, d_graph), boost::vertex(idx2, d_graph), d_graph); if (found) { res = d_graph[edge].get(); } return res; } ROMol::ADJ_ITER_PAIR ROMol::getAtomNeighbors(Atom const *at) const { return boost::adjacent_vertices(at->getIdx(), d_graph); }; ROMol::ADJ_ITER_PAIR ROMol::getAtomNeighbors(Atom::ATOM_SPTR at) const { return boost::adjacent_vertices(at->getIdx(), d_graph); }; ROMol::OBOND_ITER_PAIR ROMol::getAtomBonds(Atom const *at) const { return boost::out_edges(at->getIdx(), d_graph); } ROMol::ATOM_ITER_PAIR ROMol::getVertices() { return boost::vertices(d_graph); } ROMol::BOND_ITER_PAIR ROMol::getEdges() { return boost::edges(d_graph); } ROMol::ATOM_ITER_PAIR ROMol::getVertices() const { return boost::vertices(d_graph); } ROMol::BOND_ITER_PAIR ROMol::getEdges() const { return boost::edges(d_graph); } unsigned int ROMol::addAtom(Atom *atom_pin, bool updateLabel, bool takeOwnership) { PRECONDITION(atom_pin, "null atom passed in"); Atom *atom_p; if (!takeOwnership) atom_p = atom_pin->copy(); else atom_p = atom_pin; atom_p->setOwningMol(this); MolGraph::vertex_descriptor which = boost::add_vertex(d_graph); d_graph[which].reset(atom_p); atom_p->setIdx(which); if (updateLabel) { replaceAtomBookmark(atom_p, ci_RIGHTMOST_ATOM); } for (auto cfi = this->beginConformers(); cfi != this->endConformers(); ++cfi) { (*cfi)->setAtomPos(which, RDGeom::Point3D(0.0, 0.0, 0.0)); } return rdcast<unsigned int>(which); }; unsigned int ROMol::addAtom(Atom::ATOM_SPTR atom_sp, bool updateLabel) { return addAtom(atom_sp.get(), updateLabel, false); } unsigned int ROMol::addBond(Bond *bond_pin, bool takeOwnership) { PRECONDITION(bond_pin, "null bond passed in"); URANGE_CHECK(bond_pin->getBeginAtomIdx(), getNumAtoms()); URANGE_CHECK(bond_pin->getEndAtomIdx(), getNumAtoms()); PRECONDITION(bond_pin->getBeginAtomIdx() != bond_pin->getEndAtomIdx(), "attempt to add self-bond"); PRECONDITION(!(boost::edge(bond_pin->getBeginAtomIdx(), bond_pin->getEndAtomIdx(), d_graph) .second), "bond already exists"); Bond *bond_p; if (!takeOwnership) bond_p = bond_pin->copy(); else bond_p = bond_pin; bond_p->setOwningMol(this); bool ok; MolGraph::edge_descriptor which; boost::tie(which, ok) = boost::add_edge(bond_p->getBeginAtomIdx(), bond_p->getEndAtomIdx(), d_graph); CHECK_INVARIANT(ok, "bond could not be added"); d_graph[which].reset(bond_p); numBonds++; // int res = rdcast<int>(boost::num_edges(d_graph)); bond_p->setIdx(numBonds - 1); return numBonds; // res; } unsigned int ROMol::addBond(Bond::BOND_SPTR bsp) { return addBond(bsp.get()); } void ROMol::debugMol(std::ostream &str) const { ATOM_ITER_PAIR atItP = getVertices(); BOND_ITER_PAIR bondItP = getEdges(); str << "Atoms:" << std::endl; while (atItP.first != atItP.second) { str << "\t" << *d_graph[*(atItP.first++)].get() << std::endl; } str << "Bonds:" << std::endl; while (bondItP.first != bondItP.second) { str << "\t" << *d_graph[*(bondItP.first++)].get() << std::endl; } } // -------------------------------------------- // // Iterators // // -------------------------------------------- ROMol::AtomIterator ROMol::beginAtoms() { return AtomIterator(this); } ROMol::ConstAtomIterator ROMol::beginAtoms() const { return ConstAtomIterator(this); } ROMol::AtomIterator ROMol::endAtoms() { return AtomIterator(this, getNumAtoms()); } ROMol::ConstAtomIterator ROMol::endAtoms() const { return ConstAtomIterator(this, getNumAtoms()); } ROMol::AromaticAtomIterator ROMol::beginAromaticAtoms() { return AromaticAtomIterator(this); } ROMol::ConstAromaticAtomIterator ROMol::beginAromaticAtoms() const { return ConstAromaticAtomIterator(this); } ROMol::AromaticAtomIterator ROMol::endAromaticAtoms() { return AromaticAtomIterator(this, getNumAtoms()); } ROMol::ConstAromaticAtomIterator ROMol::endAromaticAtoms() const { return ConstAromaticAtomIterator(this, getNumAtoms()); } ROMol::HeteroatomIterator ROMol::beginHeteros() { return HeteroatomIterator(this); } ROMol::ConstHeteroatomIterator ROMol::beginHeteros() const { return ConstHeteroatomIterator(this); } ROMol::HeteroatomIterator ROMol::endHeteros() { return HeteroatomIterator(this, getNumAtoms()); } ROMol::ConstHeteroatomIterator ROMol::endHeteros() const { return ConstHeteroatomIterator(this, getNumAtoms()); } ROMol::QueryAtomIterator ROMol::beginQueryAtoms(QueryAtom const *what) { return QueryAtomIterator(this, what); } ROMol::ConstQueryAtomIterator ROMol::beginQueryAtoms( QueryAtom const *what) const { return ConstQueryAtomIterator(this, what); } ROMol::QueryAtomIterator ROMol::endQueryAtoms() { return QueryAtomIterator(this, getNumAtoms()); } ROMol::ConstQueryAtomIterator ROMol::endQueryAtoms() const { return ConstQueryAtomIterator(this, getNumAtoms()); } ROMol::MatchingAtomIterator ROMol::beginMatchingAtoms(bool (*what)(Atom *)) { return MatchingAtomIterator(this, what); } ROMol::ConstMatchingAtomIterator ROMol::beginMatchingAtoms( bool (*what)(const Atom *)) const { return ConstMatchingAtomIterator(this, what); } ROMol::MatchingAtomIterator ROMol::endMatchingAtoms() { return MatchingAtomIterator(this, getNumAtoms()); } ROMol::ConstMatchingAtomIterator ROMol::endMatchingAtoms() const { return ConstMatchingAtomIterator(this, getNumAtoms()); } ROMol::BondIterator ROMol::beginBonds() { return BondIterator(this); } ROMol::ConstBondIterator ROMol::beginBonds() const { return ConstBondIterator(this); } ROMol::BondIterator ROMol::endBonds() { EDGE_ITER beg, end; boost::tie(beg, end) = getEdges(); return BondIterator(this, end); } ROMol::ConstBondIterator ROMol::endBonds() const { EDGE_ITER beg, end; boost::tie(beg, end) = getEdges(); return ConstBondIterator(this, end); } void ROMol::clearComputedProps(bool includeRings) const { // the SSSR information: if (includeRings) this->dp_ringInfo->reset(); RDProps::clearComputedProps(); for (ConstAtomIterator atomIt = this->beginAtoms(); atomIt != this->endAtoms(); ++atomIt) { (*atomIt)->clearComputedProps(); } for (ConstBondIterator bondIt = this->beginBonds(); bondIt != this->endBonds(); bondIt++) { (*bondIt)->clearComputedProps(); } } void ROMol::updatePropertyCache(bool strict) { for (AtomIterator atomIt = this->beginAtoms(); atomIt != this->endAtoms(); ++atomIt) { (*atomIt)->updatePropertyCache(strict); } for (BondIterator bondIt = this->beginBonds(); bondIt != this->endBonds(); ++bondIt) { (*bondIt)->updatePropertyCache(strict); } } bool ROMol::needsUpdatePropertyCache() const { for (ConstAtomIterator atomIt = this->beginAtoms(); atomIt != this->endAtoms(); ++atomIt) { if ((*atomIt)->needsUpdatePropertyCache()) { return true; } } // there is no test for bonds yet since they do not obtain a valence property return false; } const Conformer &ROMol::getConformer(int id) const { // make sure we have more than one conformation if (d_confs.size() == 0) { throw ConformerException("No conformations available on the molecule"); } if (id < 0) { return *(d_confs.front()); } unsigned int cid = (unsigned int)id; for (auto ci = this->beginConformers(); ci != this->endConformers(); ++ci) { if ((*ci)->getId() == cid) { return *(*ci); } } // we did not find a coformation with the specified ID std::string mesg = "Can't find conformation with ID: "; mesg += id; throw ConformerException(mesg); } Conformer &ROMol::getConformer(int id) { // make sure we have more than one conformation if (d_confs.size() == 0) { throw ConformerException("No conformations available on the molecule"); } if (id < 0) { return *(d_confs.front()); } unsigned int cid = (unsigned int)id; for (auto ci = this->beginConformers(); ci != this->endConformers(); ++ci) { if ((*ci)->getId() == cid) { return *(*ci); } } // we did not find a coformation with the specified ID std::string mesg = "Can't find conformation with ID: "; mesg += id; throw ConformerException(mesg); } void ROMol::removeConformer(unsigned int id) { for (auto ci = d_confs.begin(); ci != d_confs.end(); ++ci) { if ((*ci)->getId() == id) { d_confs.erase(ci); return; } } } unsigned int ROMol::addConformer(Conformer *conf, bool assignId) { PRECONDITION(conf->getNumAtoms() == this->getNumAtoms(), "Number of atom mismatch"); if (assignId) { int maxId = -1; BOOST_FOREACH (CONFORMER_SPTR cptr, d_confs) { maxId = std::max((int)(cptr->getId()), maxId); } maxId++; conf->setId((unsigned int)maxId); } conf->setOwningMol(this); CONFORMER_SPTR nConf(conf); d_confs.push_back(nConf); return conf->getId(); } } // end o' namespace
1
17,910
This could be `for (auto bond: bonds()){`, right?
rdkit-rdkit
cpp
@@ -78,10 +78,13 @@ class LibraryCardsController extends AbstractBase // Connect to the ILS for login drivers: $catalog = $this->getILS(); + $config = $this->getConfig(); return $this->createViewModel( [ 'libraryCards' => $user->getLibraryCards(), - 'multipleTargets' => $catalog->checkCapability('getLoginDrivers') + 'multipleTargets' => $catalog->checkCapability('getLoginDrivers'), + 'allowConnectingCards' => $this->getAuthManager() + ->supportsConnectingLibraryCard(), ] ); }
1
<?php /** * LibraryCards Controller * * PHP version 7 * * Copyright (C) Villanova University 2010. * Copyright (C) The National Library of Finland 2015-2019. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * @category VuFind * @package Controller * @author Demian Katz <[email protected]> * @author Ere Maijala <[email protected]> * @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License * @link https://vufind.org Main Site */ namespace VuFind\Controller; use VuFind\Exception\ILS as ILSException; /** * Controller for the library card functionality. * * @category VuFind * @package Controller * @author Demian Katz <[email protected]> * @author Ere Maijala <[email protected]> * @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License * @link https://vufind.org Main Site */ class LibraryCardsController extends AbstractBase { /** * Send user's library cards to the view * * @return mixed */ public function homeAction() { if (!($user = $this->getUser())) { return $this->forceLogin(); } // Check for "delete card" request; parameter may be in GET or POST depending // on calling context. $deleteId = $this->params()->fromPost( 'delete', $this->params()->fromQuery('delete') ); if ($deleteId) { // If the user already confirmed the operation, perform the delete now; // otherwise prompt for confirmation: $confirm = $this->params()->fromPost( 'confirm', $this->params()->fromQuery('confirm') ); if ($confirm) { $success = $this->performDeleteLibraryCard($deleteId); if ($success !== true) { return $success; } } else { return $this->confirmDeleteLibraryCard($deleteId); } } // Connect to the ILS for login drivers: $catalog = $this->getILS(); return $this->createViewModel( [ 'libraryCards' => $user->getLibraryCards(), 'multipleTargets' => $catalog->checkCapability('getLoginDrivers') ] ); } /** * Send user's library card to the edit view * * @return mixed */ public function editCardAction() { // User must be logged in to edit library cards: $user = $this->getUser(); if ($user == false) { return $this->forceLogin(); } // Process email authentication: if ($this->params()->fromQuery('auth_method') === 'Email' && ($hash = $this->params()->fromQuery('hash')) ) { return $this->processEmailLink($user, $hash); } // Process form submission: if ($this->formWasSubmitted('submit')) { if ($redirect = $this->processEditLibraryCard($user)) { return $redirect; } } $id = $this->params()->fromRoute('id', $this->params()->fromQuery('id')); $card = $user->getLibraryCard($id == 'NEW' ? null : $id); $target = null; $username = $card->cat_username; $loginSettings = $this->getILSLoginSettings(); // Split target and username if multiple login targets are available: if ($loginSettings['targets'] && strstr($username, '.')) { list($target, $username) = explode('.', $username, 2); } $cardName = $this->params()->fromPost('card_name', $card->card_name); $username = $this->params()->fromPost('username', $username); $target = $this->params()->fromPost('target', $target); // Send the card to the view: return $this->createViewModel( [ 'card' => $card, 'cardName' => $cardName, 'target' => $target ?: $loginSettings['defaultTarget'], 'username' => $username, 'targets' => $loginSettings['targets'], 'defaultTarget' => $loginSettings['defaultTarget'], 'loginMethod' => $loginSettings['loginMethod'], 'loginMethods' => $loginSettings['loginMethods'], ] ); } /** * Creates a confirmation box to delete or not delete the current list * * @return mixed */ public function deleteCardAction() { // User must be logged in to edit library cards: $user = $this->getUser(); if ($user == false) { return $this->forceLogin(); } // Get requested library card ID: $cardID = $this->params() ->fromPost('cardID', $this->params()->fromQuery('cardID')); // Have we confirmed this? $confirm = $this->params()->fromPost( 'confirm', $this->params()->fromQuery('confirm') ); if ($confirm) { $user->deleteLibraryCard($cardID); // Success Message $this->flashMessenger()->addMessage('Library Card Deleted', 'success'); // Redirect to MyResearch library cards return $this->redirect()->toRoute('librarycards-home'); } // If we got this far, we must display a confirmation message: return $this->confirm( 'confirm_delete_library_card_brief', $this->url()->fromRoute('librarycards-deletecard'), $this->url()->fromRoute('librarycards-home'), 'confirm_delete_library_card_text', ['cardID' => $cardID] ); } /** * When redirecting after selecting a library card, adjust the URL to make * sure it will work correctly. * * @param string $url URL to adjust * * @return string */ protected function adjustCardRedirectUrl($url) { // If there is pagination in the URL, reset it to page 1, since the // new card may have a different number of pages of data: return preg_replace('/([&?]page)=[0-9]+/', '$1=1', $url); } /** * Activates a library card * * @return \Laminas\Http\Response */ public function selectCardAction() { $user = $this->getUser(); if ($user == false) { return $this->forceLogin(); } $cardID = $this->params()->fromQuery('cardID'); if (null === $cardID) { return $this->redirect()->toRoute('myresearch-home'); } $user->activateLibraryCard($cardID); // Connect to the ILS and check that the credentials are correct: try { $catalog = $this->getILS(); $patron = $catalog->patronLogin( $user->cat_username, $user->getCatPassword() ); if (!$patron) { $this->flashMessenger() ->addMessage('authentication_error_invalid', 'error'); } } catch (ILSException $e) { $this->flashMessenger() ->addMessage('authentication_error_technical', 'error'); } $this->setFollowupUrlToReferer(); if ($url = $this->getFollowupUrl()) { $this->clearFollowupUrl(); return $this->redirect()->toUrl($this->adjustCardRedirectUrl($url)); } return $this->redirect()->toRoute('myresearch-home'); } /** * Process the "edit library card" submission. * * @param \VuFind\Db\Row\User $user Logged in user * * @return object|bool Response object if redirect is * needed, false if form needs to be redisplayed. */ protected function processEditLibraryCard($user) { $cardName = $this->params()->fromPost('card_name', ''); $target = $this->params()->fromPost('target', ''); $username = $this->params()->fromPost('username', ''); $password = $this->params()->fromPost('password', ''); $id = $this->params()->fromRoute('id', $this->params()->fromQuery('id')); if (!$username) { $this->flashMessenger() ->addMessage('authentication_error_blank', 'error'); return false; } if ($target) { $username = "$target.$username"; } // Check the credentials if the username is changed or a new password is // entered: $card = $user->getLibraryCard($id == 'NEW' ? null : $id); if ($card->cat_username !== $username || trim($password)) { // Connect to the ILS and check that the credentials are correct: $loginMethod = $this->getILSLoginMethod($target); $catalog = $this->getILS(); try { $patron = $catalog->patronLogin($username, $password); } catch (ILSException $e) { $this->flashMessenger()->addErrorMessage('ils_connection_failed'); return false; } if ('password' === $loginMethod && !$patron) { $this->flashMessenger() ->addMessage('authentication_error_invalid', 'error'); return false; } if ('email' === $loginMethod) { if ($patron) { $info = $patron; $info['cardID'] = $id; $info['cardName'] = $cardName; $emailAuthenticator = $this->serviceLocator ->get(\VuFind\Auth\EmailAuthenticator::class); $emailAuthenticator->sendAuthenticationLink( $info['email'], $info, ['auth_method' => 'Email'], 'editLibraryCard' ); } // Don't reveal the result $this->flashMessenger()->addSuccessMessage('email_login_link_sent'); return $this->redirect()->toRoute('librarycards-home'); } } try { $user->saveLibraryCard( $id == 'NEW' ? null : $id, $cardName, $username, $password ); } catch (\VuFind\Exception\LibraryCard $e) { $this->flashMessenger()->addMessage($e->getMessage(), 'error'); return false; } return $this->redirect()->toRoute('librarycards-home'); } /** * Process library card addition via an email link * * @param User $user User object * @param string $hash Hash * * @return \Laminas\Http\Response Response object */ protected function processEmailLink($user, $hash) { $emailAuthenticator = $this->serviceLocator ->get(\VuFind\Auth\EmailAuthenticator::class); try { $info = $emailAuthenticator->authenticate($hash); $user->saveLibraryCard( 'NEW' === $info['cardID'] ? null : $info['cardID'], $info['cardName'], $info['cat_username'], ' ' ); } catch (\VuFind\Exception\Auth $e) { $this->flashMessenger()->addErrorMessage($e->getMessage()); } catch (\VuFind\Exception\LibraryCard $e) { $this->flashMessenger()->addErrorMessage($e->getMessage()); } return $this->redirect()->toRoute('librarycards-home'); } }
1
30,739
Is $config no longer being used? Can we remove this line?
vufind-org-vufind
php
@@ -179,7 +179,7 @@ char* get_file_name(char* filename) // finished with it. // If you pass in NULL or the new string can't be allocated, // it returns NULL. -char* remove_ext(const char* path, char dot, char sep, size_t* allocated_size) +char* remove_ext(const char* path, char dot, char sep, size_t* allocated_size) { char *retstr, *lastdot, *lastsep; // Error checks and allocate string.
1
#include <platform.h> #include "../../libponyrt/mem/pool.h" #include <string.h> #include <stdio.h> #if defined(PLATFORM_IS_LINUX) #include <unistd.h> #elif defined(PLATFORM_IS_MACOSX) #include <mach-o/dyld.h> #elif defined(PLATFORM_IS_BSD) #include <unistd.h> #include <sys/types.h> #include <sys/sysctl.h> #include <sys/stat.h> #endif #ifdef PLATFORM_IS_WINDOWS # define PATH_SLASH '\\' #else # define PATH_SLASH '/' #endif PONY_DIR* pony_opendir(const char* path, PONY_ERRNO* err) { #ifdef PLATFORM_IS_WINDOWS size_t path_len = strlen(path); if(path_len > (MAX_PATH - 3)) { *err = PONY_IO_PATH_TOO_LONG; return NULL; } TCHAR win_path[MAX_PATH]; strcpy(win_path, path); strcat(win_path, "\\*"); PONY_DIR* dir = POOL_ALLOC(PONY_DIR); dir->ptr = FindFirstFile(win_path, &dir->info); if(dir->ptr == INVALID_HANDLE_VALUE) { *err = GetLastError(); FindClose(dir->ptr); POOL_FREE(PONY_DIR, dir); return NULL; } return dir; #elif defined(PLATFORM_IS_POSIX_BASED) PONY_DIR* dir = opendir(path); if(dir == NULL) { *err = errno; return NULL; } return dir; #else return NULL; #endif } char* pony_realpath(const char* path, char* resolved) { #ifdef PLATFORM_IS_WINDOWS if(GetFullPathName(path, FILENAME_MAX, resolved, NULL) == 0 || GetFileAttributes(resolved) == INVALID_FILE_ATTRIBUTES) return NULL; // Strip any trailing backslashes for(size_t len = strlen(resolved); resolved[len - 1] == '\\'; --len) resolved[len - 1] = '\0'; return resolved; #elif defined(PLATFORM_IS_POSIX_BASED) return realpath(path, resolved); #endif } char* pony_dir_info_name(PONY_DIRINFO* info) { #ifdef PLATFORM_IS_WINDOWS return info->cFileName; #elif defined(PLATFORM_IS_POSIX_BASED) return info->d_name; #endif } void pony_closedir(PONY_DIR* dir) { #ifdef PLATFORM_IS_WINDOWS FindClose(dir->ptr); POOL_FREE(PONY_DIR, dir); #elif defined(PLATFORM_IS_POSIX_BASED) closedir(dir); #endif } PONY_DIRINFO* pony_dir_entry_next(PONY_DIR* dir) { #ifdef PLATFORM_IS_WINDOWS if(FindNextFile(dir->ptr, &dir->info) != 0) return &dir->info; return NULL; #elif defined(PLATFORM_IS_POSIX_BASED) return readdir(dir); #else return NULL; #endif } void pony_mkdir(const char* path) { // Copy the given path into a new buffer, one directory at a time, creating // each as we go size_t path_len = strlen(path); char* buf = (char*)ponyint_pool_alloc_size(path_len + 1); // +1 for terminator for(size_t i = 0; i < path_len; i++) { buf[i] = path[i]; if(path[i] == '/' #ifdef PLATFORM_IS_WINDOWS || path[i] == '\\' #endif ) { // Create an intermediate dir buf[i + 1] = '\0'; #ifdef PLATFORM_IS_WINDOWS CreateDirectory(buf, NULL); #else mkdir(buf, 0777); #endif } } // Create final directory #ifdef PLATFORM_IS_WINDOWS CreateDirectory(path, NULL); #else mkdir(path, 0777); #endif ponyint_pool_free_size(path_len + 1, buf); } #ifdef PLATFORM_IS_WINDOWS # include <shlwapi.h> # pragma comment(lib, "shlwapi.lib") #else # include <libgen.h> #endif char* get_file_name(char* filename) { #ifdef PLATFORM_IS_WINDOWS PathStripPath((LPSTR) filename); return filename; #else return basename(filename); #endif } // https://stackoverflow.com/questions/2736753/how-to-remove-extension-from-file-name // remove_ext: removes the "extension" from a file spec. // path is the string to process. // dot is the extension separator. // sep is the path separator (0 means to ignore). // Returns an allocated string identical to the original but // with the extension removed. It must be freed when you're // finished with it. // If you pass in NULL or the new string can't be allocated, // it returns NULL. char* remove_ext(const char* path, char dot, char sep, size_t* allocated_size) { char *retstr, *lastdot, *lastsep; // Error checks and allocate string. if (path == NULL) return NULL; *allocated_size = strlen(path) + 1; retstr = (char*) ponyint_pool_alloc_size(*allocated_size); // Make a copy and find the relevant characters. strcpy(retstr, path); lastdot = strrchr(retstr, dot); lastsep = (sep == 0) ? NULL : strrchr(retstr, sep); // If it has an extension separator. if (lastdot != NULL) { // and it's before the extension separator. if (lastsep != NULL) { if (lastsep < lastdot) { // then remove it. *lastdot = '\0'; } } else { // Has extension separator with no path separator. *lastdot = '\0'; } } // Return the modified string. return retstr; } bool get_compiler_exe_path(char* output_path, const char* argv0) { bool success = false; success = (argv0 == NULL) ? success : success; // hush compiler warning #ifdef PLATFORM_IS_WINDOWS // Specified size *includes* nul terminator GetModuleFileName(NULL, output_path, FILENAME_MAX); success = (GetLastError() == ERROR_SUCCESS); #elif defined PLATFORM_IS_LINUX // Specified size *excludes* nul terminator ssize_t r = readlink("/proc/self/exe", output_path, FILENAME_MAX - 1); success = (r >= 0); if(success) output_path[r] = '\0'; #elif defined PLATFORM_IS_OPENBSD if (argv0 != NULL && (*argv0 == '/' || *argv0 == '.')) { if (pony_realpath(argv0, output_path) != NULL) { return true; } else { return false; } } else { char *env_path = getenv("PATH"); char *token, *string, *tofree; char try_path[FILENAME_MAX]; struct stat sb; if (env_path == NULL) { return false; } tofree = string = strdup(env_path); while ((token = strsep(&string, ":")) != NULL) { snprintf(try_path, sizeof(try_path), "%s/%s", token, argv0); if (access(try_path, X_OK) == 0 && stat(try_path, &sb) == 0 && (sb.st_mode & S_IFREG) == S_IFREG) { if (pony_realpath(try_path, output_path) != NULL) { success = true; break; } } } free(tofree); } #elif defined PLATFORM_IS_BSD int mib[4]; mib[0] = CTL_KERN; mib[1] = KERN_PROC; mib[2] = KERN_PROC_PATHNAME; mib[3] = -1; size_t len = FILENAME_MAX; int r = sysctl(mib, 4, output_path, &len, NULL, 0); success = (r == 0); #elif defined PLATFORM_IS_MACOSX char exec_path[FILENAME_MAX]; uint32_t size = sizeof(exec_path); int r = _NSGetExecutablePath(exec_path, &size); success = (r == 0); if(success) { pony_realpath(exec_path, output_path); } #else # error Unsupported platform for exec_path() #endif return success; } bool get_compiler_exe_directory(char* output_path, const char* argv0) { bool can_get_compiler_path = get_compiler_exe_path(output_path, argv0); if (can_get_compiler_path) { char *p = strrchr(output_path, PATH_SLASH); if(p == NULL) { return false; } p++; *p = '\0'; return true; } else { return false; } }
1
13,819
can you revert changes to this file.
ponylang-ponyc
c
@@ -2139,7 +2139,7 @@ public class DBService { Role templateRole = updateTemplateRole(role, domainName, roleName, templateParams); firstEntry = auditLogSeparator(auditDetails, firstEntry); auditDetails.append(" \"add-role\": "); - if (!processRole(con, originalRole, domainName, roleName, templateRole, + if (!processRole(con, originalRole, domainName, ZMSUtils.removeDomainPrefix(templateRole.getName(), domainName, ROLE_PREFIX), templateRole, admin, auditRef, true, auditDetails)) { return false; }
1
/* * Copyright 2016 Yahoo Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.yahoo.athenz.zms; import java.util.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.yahoo.athenz.auth.Principal; import com.yahoo.athenz.common.server.log.AuditLogMsgBuilder; import com.yahoo.athenz.common.server.log.AuditLogger; import com.yahoo.athenz.common.server.util.StringUtils; import com.yahoo.athenz.zms.store.AthenzDomain; import com.yahoo.athenz.zms.store.ObjectStore; import com.yahoo.athenz.zms.store.ObjectStoreConnection; import com.yahoo.athenz.zms.utils.ZMSUtils; import com.yahoo.rdl.JSON; import com.yahoo.rdl.Timestamp; import com.yahoo.rdl.UUID; public class DBService { ObjectStore store; final String userDomain; AuditLogger auditLogger; Cache<String, DataCache> cacheStore; QuotaChecker quotaCheck; int retrySleepTime; int defaultRetryCount; int defaultOpTimeout; private static final Logger LOG = LoggerFactory.getLogger(DBService.class); private static final String ROLE_PREFIX = "role."; private static final String POLICY_PREFIX = "policy."; private static final String TEMPLATE_DOMAIN_NAME = "_domain_"; public DBService(ObjectStore store, AuditLogger auditLogger, String userDomain) { this.store = store; this.userDomain = userDomain; this.auditLogger = auditLogger; cacheStore = CacheBuilder.newBuilder().concurrencyLevel(25).build(); // default timeout in seconds for object store commands defaultOpTimeout = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_STORE_OP_TIMEOUT, "60")); if (defaultOpTimeout < 0) { defaultOpTimeout = 60; } if (this.store != null) { this.store.setOperationTimeout(defaultOpTimeout); } // retrieve the concurrent update retry count. If we're given an invalid negative // value for count, we'll default back to our default configured value of 120 retries // which would result up to 30 seconds sleeping 250ms each time defaultRetryCount = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_CONFLICT_RETRY_COUNT, "120")); if (defaultRetryCount < 0) { defaultRetryCount = 120; } retrySleepTime = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_CONFLICT_RETRY_SLEEP_TIME, "250")); if (retrySleepTime < 0) { retrySleepTime = 250; } // create our quota checker class quotaCheck = new QuotaChecker(); } static class DataCache { AthenzDomain athenzDomain; long modTime; DataCache(AthenzDomain athenzDomain, long modTime) { this.athenzDomain = athenzDomain; this.modTime = modTime; } AthenzDomain getAthenzDomain() { return athenzDomain; } long getModTime() { return modTime; } } AthenzDomain getAthenzDomainFromCache(String domainName, boolean masterCopy) { // if we have a match for a given domain name then we're going // to check if the last modified domain timestamp matches to what's // in the db: So if there is no match, then we'll take the hit // of extra db read, however, in most cases the domain data is not // changed that often so we'll satisfy the request with just // verifying the last modification time as oppose to reading the // full domain data from db DataCache data = cacheStore.getIfPresent(domainName); if (data == null) { return null; } long modTime = 0; try (ObjectStoreConnection con = store.getConnection(true, masterCopy)) { // we expect this response to come back immediately from // object store so we're going to use a smaller timeout // so we should know right away to use our cache con.setOperationTimeout(10); modTime = con.getDomainModTimestamp(domainName); } catch (ResourceException ex) { // if the exception is due to timeout or we were not able // to get a connection to the object store then we're // going to use our cache as is instead of rejecting // the operation if (ex.getCode() == ResourceException.SERVICE_UNAVAILABLE) { return data.getAthenzDomain(); } } // if our cache data is same or newer than db then return // data from the cache (it could be newer if we just updated // the cache based on write db but during read, the server // hasn't replicated the data yet) if (data.getModTime() >= modTime) { return data.getAthenzDomain(); } cacheStore.invalidate(domainName); return null; } String getPrincipalName(ResourceContext ctx) { if (ctx == null) { return null; } Principal principal = ((RsrcCtxWrapper) ctx).principal(); if (principal == null) { return null; } return principal.getFullName(); } void saveChanges(ObjectStoreConnection con, String domainName) { // we're first going to commit our changes which will // also set the connection in auto-commit mode. we are // going to change the domain timestamp in auto-commit // mode so that we don't have a contention con.commitChanges(); con.updateDomainModTimestamp(domainName); cacheStore.invalidate(domainName); } void auditLogRequest(ResourceContext ctx, String domainName, String auditRef, String caller, String operation, String entityName, String auditDetails) { AuditLogMsgBuilder msgBldr = ZMSUtils.getAuditLogMsgBuilder(ctx, auditLogger, domainName, auditRef, caller, operation); msgBldr.when(Timestamp.fromCurrentTime().toString()).whatEntity(entityName); if (auditDetails != null) { msgBldr.whatDetails(auditDetails); } auditLogger.log(msgBldr); } Domain makeDomain(ResourceContext ctx, String domainName, String description, String org, Boolean auditEnabled, List<String> adminUsers, String account, int productId, String applicationId, List<String> solutionTemplates, String auditRef) { final String caller = "makedomain"; Domain domain = new Domain() .setName(domainName) .setAuditEnabled(auditEnabled) .setDescription(description) .setOrg(org) .setId(UUID.fromCurrentTime()) .setAccount(account) .setYpmId(productId) .setModified(Timestamp.fromCurrentTime()) .setApplicationId(applicationId); // get our connection object int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // before adding this domain we need to verify our // quota check for sub-domains quotaCheck.checkSubdomainQuota(con, domainName, caller); boolean objectsInserted = con.insertDomain(domain); if (!objectsInserted) { con.rollbackChanges(); throw ZMSUtils.requestError("makeDomain: Cannot create domain: " + domainName + " - already exists", caller); } StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT); auditDetails.append("{\"domain\": "); auditLogDomain(auditDetails, domain); // first create and process the admin role Role adminRole = ZMSUtils.makeAdminRole(domainName, adminUsers); auditDetails.append(", \"role\": "); if (!processRole(con, null, domainName, ZMSConsts.ADMIN_ROLE_NAME, adminRole, getPrincipalName(ctx), auditRef, false, auditDetails)) { con.rollbackChanges(); throw ZMSUtils.internalServerError("makeDomain: Cannot process role: '" + adminRole.getName(), caller); } // now create and process the admin policy Policy adminPolicy = ZMSUtils.makeAdminPolicy(domainName, adminRole); auditDetails.append(", \"policy\": "); if (!processPolicy(con, null, domainName, ZMSConsts.ADMIN_POLICY_NAME, adminPolicy, false, auditDetails)) { con.rollbackChanges(); throw ZMSUtils.internalServerError("makeDomain: Cannot process policy: '" + adminPolicy.getName(), caller); } // go through our list of templates and add the specified // roles and polices to our domain if (solutionTemplates != null) { for (String templateName : solutionTemplates) { auditDetails.append(", \"template\": "); if (!addSolutionTemplate(con, domainName, templateName, getPrincipalName(ctx), null, auditRef, auditDetails)) { con.rollbackChanges(); throw ZMSUtils.internalServerError("makeDomain: Cannot apply templates: '" + domain, caller); } } } auditDetails.append("}"); // update our domain time-stamp and save changes saveChanges(con, domainName); // audit log entry auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_POST, domainName, auditDetails.toString()); return domain; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); return null; } boolean processPolicy(ObjectStoreConnection con, Policy originalPolicy, String domainName, String policyName, Policy policy, boolean ignoreDeletes, StringBuilder auditDetails) { // check to see if we need to insert the policy or update it boolean requestSuccess; if (originalPolicy == null) { requestSuccess = con.insertPolicy(domainName, policy); } else { requestSuccess = con.updatePolicy(domainName, policy); } // if we didn't update any policies then we need to return failure if (!requestSuccess) { return false; } // open our audit record auditDetails.append("{\"name\": \"").append(policyName).append('\"'); // now we need process our policy assertions depending this is // a new insert operation or an update List<Assertion> newAssertions = policy.getAssertions(); if (originalPolicy == null) { // we're just going to process our new assertions if (newAssertions != null) { for (Assertion assertion : newAssertions) { if (!con.insertAssertion(domainName, policyName, assertion)) { return false; } } auditLogAssertions(auditDetails, "added-assertions", newAssertions); } } else { // first we need to retrieve the current set of assertions List<Assertion> curAssertions = originalPolicy.getAssertions(); if (curAssertions == null) { curAssertions = new ArrayList<>(); } List<Assertion> addAssertions = new ArrayList<>(); List<Assertion> delAssertions = new ArrayList<>(); policyAssertionChanges(newAssertions, curAssertions, addAssertions, delAssertions); if (!ignoreDeletes) { for (Assertion assertion : delAssertions) { if (!con.deleteAssertion(domainName, policyName, assertion.getId())) { return false; } } auditLogAssertions(auditDetails, "deleted-assertions", delAssertions); } for (Assertion assertion : addAssertions) { if (!con.insertAssertion(domainName, policyName, assertion)) { return false; } } auditLogAssertions(auditDetails, "added-assertions", addAssertions); } auditDetails.append('}'); return true; } boolean removeMatchedAssertion(Assertion assertion, List<Assertion> assertions, List<Assertion> matchedAssertions) { AssertionEffect effect = AssertionEffect.ALLOW; if (assertion.getEffect() != null) { effect = assertion.getEffect(); } Iterator<Assertion> itr = assertions.iterator(); while (itr.hasNext()) { Assertion checkAssertion = itr.next(); if (!assertion.getAction().equals(checkAssertion.getAction())) { continue; } if (!assertion.getResource().equals(checkAssertion.getResource())) { continue; } if (!assertion.getRole().equals(checkAssertion.getRole())) { continue; } AssertionEffect checkEffect = AssertionEffect.ALLOW; if (checkAssertion.getEffect() != null) { checkEffect = checkAssertion.getEffect(); } if (effect != checkEffect) { continue; } itr.remove(); matchedAssertions.add(checkAssertion); return true; } return false; } void policyAssertionChanges(List<Assertion> newAssertions, List<Assertion> curAssertions, List<Assertion> addAssertions, List<Assertion> delAssertions) { // let's iterate through the new list and the ones that are // not in the current list should be added to the add list List<Assertion> matchedAssertions = new ArrayList<>(); if (newAssertions != null) { for (Assertion assertion : newAssertions) { if (!removeMatchedAssertion(assertion, curAssertions, matchedAssertions)) { addAssertions.add(assertion); } } } // now our current list has been updated as well and // all the assertions that were present moved to the // matched assertion list so whatever left in the // current list must be deleted delAssertions.addAll(curAssertions); // now let's go back and re-add the matched assertions // back to our list so we can get the right audit data curAssertions.addAll(matchedAssertions); } boolean processRole(ObjectStoreConnection con, Role originalRole, String domainName, String roleName, Role role, String admin, String auditRef, boolean ignoreDeletes, StringBuilder auditDetails) { // check to see if we need to insert the role or update it boolean requestSuccess; if (originalRole == null) { requestSuccess = con.insertRole(domainName, role); } else { requestSuccess = con.updateRole(domainName, role); } // if we didn't update any roles then we need to return failure if (!requestSuccess) { return false; } // open our audit record and log our trust field if one is available auditDetails.append("{\"name\": \"").append(roleName) .append("\", \"trust\": \"").append(role.getTrust()).append('\"'); // now we need process our role members depending this is // a new insert operation or an update List<RoleMember> roleMembers = role.getRoleMembers(); // support older clients which might send members field // at this point, we expect either roleMembers or members, // and we can't have both List<String> members = role.getMembers(); if (members != null && !members.isEmpty()) { roleMembers = ZMSUtils.convertMembersToRoleMembers(members); } if (originalRole == null) { // we are just going to process all members as new inserts if (roleMembers != null) { for (RoleMember member : roleMembers) { if (!con.insertRoleMember(domainName, roleName, member, admin, auditRef)) { return false; } } auditLogRoleMembers(auditDetails, "added-members", roleMembers); } } else { processUpdateRoleMembers(con, originalRole, roleMembers, ignoreDeletes, domainName, roleName, admin, auditRef, auditDetails); } auditDetails.append('}'); return true; } private boolean processUpdateRoleMembers(ObjectStoreConnection con, Role originalRole, List<RoleMember> roleMembers, boolean ignoreDeletes, String domainName, String roleName, String admin, String auditRef, StringBuilder auditDetails) { // first we need to retrieve the current set of members List<RoleMember> originalMembers = originalRole.getRoleMembers(); List<RoleMember> curMembers = (null == originalMembers) ? new ArrayList<>() : new ArrayList<>(originalMembers); List<RoleMember> delMembers = new ArrayList<>(curMembers); ArrayList<RoleMember> newMembers = (null == roleMembers) ? new ArrayList<>() : new ArrayList<>(roleMembers); // remove current members from new members ZMSUtils.removeMembers(newMembers, curMembers); // remove new members from current members // which leaves the deleted members. ZMSUtils.removeMembers(delMembers, roleMembers); if (!ignoreDeletes) { for (RoleMember member : delMembers) { if (!con.deleteRoleMember(domainName, roleName, member.getMemberName(), admin, auditRef)) { return false; } } auditLogRoleMembers(auditDetails, "deleted-members", delMembers); } for (RoleMember member : newMembers) { if (!con.insertRoleMember(domainName, roleName, member, admin, auditRef)) { return false; } } auditLogRoleMembers(auditDetails, "added-members", newMembers); return true; } boolean processServiceIdentity(ObjectStoreConnection con, ServiceIdentity originalService, String domainName, String serviceName, ServiceIdentity service, StringBuilder auditDetails) { boolean requestSuccess; if (originalService == null) { requestSuccess = con.insertServiceIdentity(domainName, service); } else { requestSuccess = con.updateServiceIdentity(domainName, service); } // if we didn't update any services then we need to return failure if (!requestSuccess) { return false; } // open our audit record and log our service details auditDetails.append("{\"name\": \"").append(serviceName).append('\"') .append(", \"executable\": \"").append(service.getExecutable()).append('\"') .append(", \"user\": \"").append(service.getUser()).append('\"') .append(", \"group\": \"").append(service.getGroup()).append('\"') .append(", \"providerEndpoint\": \"").append(service.getProviderEndpoint()).append('\"') .append(", \"description\"L \"").append(service.getDescription()).append('\"'); // now we need process our public keys depending this is // a new insert operation or an update List<PublicKeyEntry> publicKeys = service.getPublicKeys(); if (originalService == null) { // we are just going to process all public keys as new inserts if (publicKeys != null) { for (PublicKeyEntry publicKey : publicKeys) { if (!con.insertPublicKeyEntry(domainName, serviceName, publicKey)) { return false; } } auditLogPublicKeyEntries(auditDetails, "added-publickeys", publicKeys); } } else { // first we need to retrieve the current set of public keys List<PublicKeyEntry> curPublicKeys = originalService.getPublicKeys(); Map<String, PublicKeyEntry> curPublicKeysMap = new HashMap<>(); if (curPublicKeys != null) { for (PublicKeyEntry publicKey : curPublicKeys) { curPublicKeysMap.put(publicKey.getId(), publicKey); } } Map<String, PublicKeyEntry> publicKeysMap = new HashMap<>(); if (publicKeys != null) { for (PublicKeyEntry publicKey : publicKeys) { publicKeysMap.put(publicKey.getId(), publicKey); } } Set<String> curPublicKeysSet = new HashSet<>(curPublicKeysMap.keySet()); Set<String> delPublicKeysSet = new HashSet<>(curPublicKeysSet); Set<String> newPublicKeysSet = new HashSet<>(publicKeysMap.keySet()); newPublicKeysSet.removeAll(curPublicKeysSet); delPublicKeysSet.removeAll(new HashSet<>(publicKeysMap.keySet())); for (String publicKey : delPublicKeysSet) { if (!con.deletePublicKeyEntry(domainName, serviceName, publicKey)) { return false; } } auditLogPublicKeyEntries(auditDetails, "deleted-publickeys", delPublicKeysSet); for (String publicKey : newPublicKeysSet) { if (!con.insertPublicKeyEntry(domainName, serviceName, publicKeysMap.get(publicKey))) { return false; } } auditLogPublicKeyEntries(auditDetails, "added-publickeys", newPublicKeysSet, publicKeysMap); } // now we need to process the hosts defined for this service Set<String> curHosts; if (originalService != null && originalService.getHosts() != null) { curHosts = new HashSet<>(originalService.getHosts()); } else { curHosts = new HashSet<>(); } Set<String> newHosts; if (service.getHosts() != null) { newHosts = new HashSet<>(service.getHosts()); } else { newHosts = new HashSet<>(); } Set<String> delHosts = new HashSet<>(curHosts); delHosts.removeAll(newHosts); newHosts.removeAll(curHosts); for (String host : delHosts) { if (!con.deleteServiceHost(domainName, serviceName, host)) { return false; } } auditLogStrings(auditDetails, "deleted-hosts", delHosts); for (String host : newHosts) { if (!con.insertServiceHost(domainName, serviceName, host)) { return false; } } auditLogStrings(auditDetails, "added-hosts", newHosts); auditDetails.append('}'); return true; } boolean shouldRetryOperation(ResourceException ex, int retryCount) { // before doing anything else let's check to see if // we still have the option to retry the operation if (retryCount <= 1) { return false; } // if we got a conflict result it means we either had // no connection or deadlock was detected and as such // the changes were aborted boolean retry = false; switch (ex.getCode()) { case ResourceException.CONFLICT: retry = true; break; case ResourceException.GONE: // this error indicates that the server is reporting is in // read-only mode which indicates a fail-over has taken place // and we need to clear all connections and start new ones // this could only happen with write operations against the // read-write object store store.clearConnections(); retry = true; break; } // if we're asked to retry then we're going to // wait for a short period of time to allow the other // connection to finish its work if (retry) { if (LOG.isDebugEnabled()) { LOG.debug(": possible deadlock, retries available: " + retryCount); } ZMSUtils.threadSleep(retrySleepTime); } // return our response return retry; } void executePutPolicy(ResourceContext ctx, String domainName, String policyName, Policy policy, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, domainName, auditRef, caller); // check that quota is not exceeded quotaCheck.checkPolicyQuota(con, domainName, policy, caller); // retrieve our original policy Policy originalPolicy = getPolicy(con, domainName, policyName); // now process the request StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT); if (!processPolicy(con, originalPolicy, domainName, policyName, policy, false, auditDetails)) { con.rollbackChanges(); throw ZMSUtils.internalServerError("unable to put policy: " + policy.getName(), caller); } // update our domain time-stamp and save changes saveChanges(con, domainName); // audit log the request auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT, policyName, auditDetails.toString()); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } void executePutRole(ResourceContext ctx, String domainName, String roleName, Role role, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, domainName, auditRef, caller); // check that quota is not exceeded quotaCheck.checkRoleQuota(con, domainName, role, caller); // retrieve our original role Role originalRole = getRole(con, domainName, roleName, false, false); // now process the request StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT); if (!processRole(con, originalRole, domainName, roleName, role, getPrincipalName(ctx), auditRef, false, auditDetails)) { con.rollbackChanges(); throw ZMSUtils.internalServerError("unable to put role: " + role.getName(), caller); } // update our domain time-stamp and save changes saveChanges(con, domainName); // audit log the request auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT, roleName, auditDetails.toString()); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } void executePutServiceIdentity(ResourceContext ctx, String domainName, String serviceName, ServiceIdentity service, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, domainName, auditRef, caller); // check that quota is not exceeded quotaCheck.checkServiceIdentityQuota(con, domainName, service, caller); // retrieve our original service identity object ServiceIdentity originalService = getServiceIdentity(con, domainName, serviceName); // now process the request StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT); if (!processServiceIdentity(con, originalService, domainName, serviceName, service, auditDetails)) { con.rollbackChanges(); throw ZMSUtils.internalServerError("unable to put service: " + service.getName(), caller); } // update our domain time-stamp and save changes saveChanges(con, domainName); // audit log the request auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT, serviceName, auditDetails.toString()); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } void executePutPublicKeyEntry(ResourceContext ctx, String domainName, String serviceName, PublicKeyEntry keyEntry, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, domainName, auditRef, caller); // check to see if this key already exists or not PublicKeyEntry originalKeyEntry = con.getPublicKeyEntry(domainName, serviceName, keyEntry.getId(), false); // now we need verify our quota check if we know that // that we'll be adding another public key if (originalKeyEntry == null) { quotaCheck.checkServiceIdentityPublicKeyQuota(con, domainName, serviceName, caller); } // now process the request boolean requestSuccess; StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT); if (originalKeyEntry == null) { requestSuccess = con.insertPublicKeyEntry(domainName, serviceName, keyEntry); auditDetails.append("{\"added-publicKeys\": ["); } else { requestSuccess = con.updatePublicKeyEntry(domainName, serviceName, keyEntry); auditDetails.append("{\"updated-publicKeys\": ["); } if (!requestSuccess) { con.rollbackChanges(); throw ZMSUtils.internalServerError("unable to put public key: " + keyEntry.getId() + " in service " + ZMSUtils.serviceResourceName(domainName, serviceName), caller); } // update our service and domain time-stamp and save changes con.updateServiceIdentityModTimestamp(domainName, serviceName); saveChanges(con, domainName); // audit log the request auditLogPublicKeyEntry(auditDetails, keyEntry, true); auditDetails.append("]}"); if (null != ctx) { auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT, serviceName, auditDetails.toString()); } return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } void executeDeletePublicKeyEntry(ResourceContext ctx, String domainName, String serviceName, String keyId, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, domainName, auditRef, caller); // now process the request if (!con.deletePublicKeyEntry(domainName, serviceName, keyId)) { con.rollbackChanges(); throw ZMSUtils.notFoundError("unable to delete public key: " + keyId + " in service " + ZMSUtils.serviceResourceName(domainName, serviceName), caller); } // update our service and domain time-stamp and save changes con.updateServiceIdentityModTimestamp(domainName, serviceName); saveChanges(con, domainName); // audit log the request StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT); auditDetails.append("{\"deleted-publicKeys\": [{\"id\": \"").append(keyId).append("\"}]}"); if (null != ctx) { auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE, serviceName, auditDetails.toString()); } return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } boolean isTrustRole(Role role) { if (role == null) { return false; } return role.getTrust() != null && !role.getTrust().isEmpty(); } void executePutMembership(ResourceContext ctx, String domainName, String roleName, RoleMember roleMember, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(true, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, domainName, auditRef, caller); // before inserting a member we need to verify that // this is a group role and not a delegated one. if (isTrustRole(con.getRole(domainName, roleName))) { con.rollbackChanges(); throw ZMSUtils.requestError(caller + ": " + roleName + "is a delegated role", caller); } // now we need verify our quota check quotaCheck.checkRoleMembershipQuota(con, domainName, roleName, caller); // process our insert role member support. since this is a "single" // operation, we are not using any transactions. if (!con.insertRoleMember(domainName, roleName, roleMember, getPrincipalName(ctx), auditRef)) { con.rollbackChanges(); throw ZMSUtils.requestError(caller + ": unable to insert role member: " + roleMember.getMemberName() + " to role: " + roleName, caller); } // update our role and domain time-stamps, and invalidate local cache entry con.updateRoleModTimestamp(domainName, roleName); con.updateDomainModTimestamp(domainName); cacheStore.invalidate(domainName); // audit log the request auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT, roleName, "{\"member\": \"" + roleMember.getMemberName() + "\"}"); return; } catch (ResourceException ex) { // otherwise check if we need to retry or return failure if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } void executePutEntity(ResourceContext ctx, String domainName, String entityName, Entity entity, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, domainName, auditRef, caller); // check that quota is not exceeded quotaCheck.checkEntityQuota(con, domainName, entity, caller); // check to see if this key already exists or not Entity originalEntity = con.getEntity(domainName, entityName); // now process the request boolean requestSuccess; if (originalEntity == null) { requestSuccess = con.insertEntity(domainName, entity); } else { requestSuccess = con.updateEntity(domainName, entity); } if (!requestSuccess) { con.rollbackChanges(); throw ZMSUtils.internalServerError("unable to put entity: " + entity.getName(), caller); } // update our domain time-stamp and save changes saveChanges(con, domainName); // audit log the request auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT, entity.getName(), JSON.string(entity.getValue())); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } void executeDeleteMembership(ResourceContext ctx, String domainName, String roleName, String normalizedMember, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(true, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, domainName, auditRef, caller); // if this is the admin role then we need to make sure // the admin is not himself who happens to be the last // member in the role if (ZMSConsts.ADMIN_ROLE_NAME.equals(roleName)) { List<RoleMember> members = con.listRoleMembers(domainName, roleName); if (members.size() == 1 && members.get(0).getMemberName().equals(normalizedMember)) { throw ZMSUtils.forbiddenError(caller + ": Cannot delete last member of 'admin' role", caller); } } // process our delete role member operation if (!con.deleteRoleMember(domainName, roleName, normalizedMember, getPrincipalName(ctx), auditRef)) { con.rollbackChanges(); throw ZMSUtils.notFoundError(caller + ": unable to delete role member: " + normalizedMember + " from role: " + roleName, caller); } // update our role and domain time-stamps, and invalidate local cache entry con.updateRoleModTimestamp(domainName, roleName); con.updateDomainModTimestamp(domainName); cacheStore.invalidate(domainName); // audit log the request auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE, roleName, "{\"member\": \"" + normalizedMember + "\"}"); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } void executeDeleteServiceIdentity(ResourceContext ctx, String domainName, String serviceName, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, domainName, auditRef, caller); // process our delete service request if (!con.deleteServiceIdentity(domainName, serviceName)) { con.rollbackChanges(); throw ZMSUtils.notFoundError(caller + ": unable to delete service: " + serviceName, caller); } // update our domain time-stamp and save changes saveChanges(con, domainName); // audit log the request auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE, serviceName, null); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } void executeDeleteEntity(ResourceContext ctx, String domainName, String entityName, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, domainName, auditRef, caller); // process our delete role request if (!con.deleteEntity(domainName, entityName)) { con.rollbackChanges(); throw ZMSUtils.notFoundError(caller + ": unable to delete entity: " + entityName, caller); } // update our domain time-stamp and save changes saveChanges(con, domainName); // audit log the request auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE, entityName, null); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } void executeDeleteRole(ResourceContext ctx, String domainName, String roleName, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, domainName, auditRef, caller); // process our delete role request if (!con.deleteRole(domainName, roleName)) { con.rollbackChanges(); throw ZMSUtils.notFoundError(caller + ": unable to delete role: " + roleName, caller); } // update our domain time-stamp and save changes saveChanges(con, domainName); // audit log the request auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE, roleName, null); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } void executeDeletePolicy(ResourceContext ctx, String domainName, String policyName, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, domainName, auditRef, caller); // process our delete policy request if (!con.deletePolicy(domainName, policyName)) { con.rollbackChanges(); throw ZMSUtils.notFoundError(caller + ": unable to delete policy: " + policyName, caller); } // update our domain time-stamp and save changes saveChanges(con, domainName); // audit log the request auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE, policyName, null); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } /** * If the domain has audit enabled, and user did not provide the auditRef, * an exception will be thrown. This is the first check before any write * operation is carried out so we don't really have anything to roll-back **/ Domain checkDomainAuditEnabled(ObjectStoreConnection con, String domainName, String auditRef, String caller) { Domain domain = con.getDomain(domainName); if (domain == null) { con.rollbackChanges(); throw ZMSUtils.notFoundError(caller + ": Unknown domain: " + domainName, caller); } if (domain.getAuditEnabled() && (auditRef == null || auditRef.length() == 0)) { con.rollbackChanges(); throw ZMSUtils.requestError(caller + ": Audit reference required for domain: " + domainName, caller); } return domain; } Domain executeDeleteDomain(ResourceContext ctx, String domainName, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met Domain domain = checkDomainAuditEnabled(con, domainName, auditRef, caller); // now process the request con.deleteDomain(domainName); con.commitChanges(); cacheStore.invalidate(domainName); // audit log the request auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE, domainName, null); return domain; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); return null; } List<String> listPrincipals(String domainName, boolean domainOnly) { try (ObjectStoreConnection con = store.getConnection(true, false)) { List<String> principals = con.listPrincipals(domainName); // if no further filtering is necessary, return the data // right away if (!domainOnly) { return principals; } // generate our return list List<String> users = new ArrayList<>(); // if we're asked for domain only then we need to match // the domain name, if specified, and make sure the response // only includes a single period/domain separator // we need to skip an extra byte to accommodate for the // domain separator (e.g. <domainName>.<userName>) int prefixLength = 0; if (domainName != null) { prefixLength = domainName.length() + 1; } for (String principal : principals) { // make sure the principal name doesn't have multiple // components - e.g. user.joe.test since it represents // a service or a sub-domain and we're only interested // in actual users if (prefixLength > 0) { if (principal.substring(prefixLength).indexOf('.') == -1) { users.add(principal); } } else { // we have a single separator when the first index // and the last index are the same if (principal.indexOf('.') == principal.lastIndexOf('.')) { users.add(principal); } } } return users; } } void removePrincipalFromAllRoles(ObjectStoreConnection con, String principalName, String adminUser, String auditRef) { // extract all the roles that this principal is member of // we have to this here so that there are records of // entries in the role member audit logs and the domain // entries are properly invalidated List<PrincipalRole> roles; try { roles = con.listPrincipalRoles(principalName); } catch (ResourceException ex) { // if there is no such principal then we have nothing to do if (ex.getCode() == ResourceException.NOT_FOUND) { return; } else { throw ex; } } for (PrincipalRole role : roles) { final String domainName = role.getDomainName(); final String roleName = role.getRoleName(); // process our delete role member operation if (LOG.isDebugEnabled()) { LOG.debug("removePrincipalFromAllRoles: removing member {} from {}:role.{}", principalName, domainName, roleName); } // we are going to ignore all errors here rather than // rejecting the full operation. our delete user will // eventually remove all these principals try { con.deleteRoleMember(domainName, roleName, principalName, adminUser, auditRef); } catch (ResourceException ex) { LOG.error("removePrincipalFromAllRoles: unable to remove {} from {}:role.{} - error {}", principalName, domainName, roleName, ex.getMessage()); } // update our role and domain time-stamps, and invalidate local cache entry con.updateRoleModTimestamp(domainName, roleName); con.updateDomainModTimestamp(domainName); } } void removePrincipalDomains(ObjectStoreConnection con, String principalName) { // first we're going to retrieve the list domains for // the given user final String domainPrefix = principalName + "."; List<String> subDomains = con.listDomains(domainPrefix, 0); // first we're going to delete the user domain if // one exists and then all the sub-domains. We're not // going to fail the operation for these steps - only // if the actual user is not deleted con.deleteDomain(principalName); cacheStore.invalidate(principalName); for (String subDomain : subDomains) { con.deleteDomain(subDomain); cacheStore.invalidate(subDomain); } } void executeDeleteUser(ResourceContext ctx, String userName, String domainName, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(true, true)) { // remove all principal domains removePrincipalDomains(con, domainName); // extract all principals that this user has - this would // include the user self plus all services this user // has created in the personal domain + sub-domains List<String> userSvcPrincipals = con.listPrincipals(domainName); // remove this user from all roles manually so that we // can have an audit log record for each role final String adminPrincipal = getPrincipalName(ctx); removePrincipalFromAllRoles(con, userName, adminPrincipal, auditRef); for (String userSvcPrincipal : userSvcPrincipals) { removePrincipalFromAllRoles(con, userSvcPrincipal, adminPrincipal, auditRef); } // finally delete the principal object. any roles that were // left behind will be cleaned up from this operation if (!con.deletePrincipal(userName, true)) { throw ZMSUtils.notFoundError(caller + ": unable to delete user: " + userName, caller); } // audit log the request auditLogRequest(ctx, userName, auditRef, caller, ZMSConsts.HTTP_DELETE, userName, null); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } ServiceIdentity getServiceIdentity(String domainName, String serviceName) { try (ObjectStoreConnection con = store.getConnection(true, false)) { return getServiceIdentity(con, domainName, serviceName); } } DomainTemplateList listDomainTemplates(String domainName) { try (ObjectStoreConnection con = store.getConnection(true, false)) { DomainTemplateList domainTemplateList = new DomainTemplateList(); domainTemplateList.setTemplateNames(con.listDomainTemplates(domainName)); return domainTemplateList; } } ServiceIdentity getServiceIdentity(ObjectStoreConnection con, String domainName, String serviceName) { ServiceIdentity service = con.getServiceIdentity(domainName, serviceName); if (service != null) { service.setPublicKeys(con.listPublicKeys(domainName, serviceName)); List<String> hosts = con.listServiceHosts(domainName, serviceName); if (hosts != null && !hosts.isEmpty()) { service.setHosts(hosts); } } return service; } PublicKeyEntry getPublicKeyFromCache(String domainName, String serviceName, String keyId) { DataCache data = cacheStore.getIfPresent(domainName); if (data == null) { return null; } AthenzDomain athenzDomain = data.getAthenzDomain(); if (athenzDomain == null) { return null; } List<ServiceIdentity> services = athenzDomain.getServices(); if (services == null) { return null; } final String fullServiceName = ZMSUtils.serviceResourceName(domainName, serviceName); for (ServiceIdentity service : services) { if (fullServiceName.equals(service.getName())) { List<PublicKeyEntry> publicKeys = service.getPublicKeys(); if (publicKeys != null) { for (PublicKeyEntry publicKey : publicKeys) { if (keyId.equals(publicKey.getId())) { return publicKey; } } } break; } } return null; } PublicKeyEntry getServicePublicKeyEntry(String domainName, String serviceName, String keyId, boolean domainStateCheck) { try (ObjectStoreConnection con = store.getConnection(true, false)) { return con.getPublicKeyEntry(domainName, serviceName, keyId, domainStateCheck); } catch (ResourceException ex) { if (ex.getCode() != ResourceException.SERVICE_UNAVAILABLE) { throw ex; } } // if we got this far it means we couldn't get our public key // from our DB store either due to timeout or communication // error so we're going to see if we have the public key in // our cache and use that for our requests PublicKeyEntry keyEntry = getPublicKeyFromCache(domainName, serviceName, keyId); if (keyEntry == null) { throw new ResourceException(ResourceException.SERVICE_UNAVAILABLE, "Unable to retrieve public key from DB store"); } return keyEntry; } public ResourceAccessList getResourceAccessList(String principal, String action) { // this commands takes a quite a bit of time due to joining tables // and needs to be optimized. For now we'll configure it with // default timeout of 30 minutes to avoid any issues try (ObjectStoreConnection con = store.getConnection(true, false)) { con.setOperationTimeout(1800); return con.listResourceAccess(principal, action, userDomain); } } Domain getDomain(String domainName, boolean masterCopy) { try (ObjectStoreConnection con = store.getConnection(true, masterCopy)) { return con.getDomain(domainName); } } List<String> listDomains(String prefix, long modifiedSince) { try (ObjectStoreConnection con = store.getConnection(true, false)) { return con.listDomains(prefix, modifiedSince); } } DomainList lookupDomainById(String account, int productId) { DomainList domList = new DomainList(); try (ObjectStoreConnection con = store.getConnection(true, false)) { String domain = con.lookupDomainById(account, productId); if (domain != null) { List<String> list = Collections.singletonList(domain); domList.setNames(list); } } return domList; } DomainList lookupDomainByAccount(String account) { return lookupDomainById(account, 0); } DomainList lookupDomainByProductId(Integer productId) { return lookupDomainById(null, productId); } DomainList lookupDomainByRole(String roleMember, String roleName) { DomainList domList = new DomainList(); try (ObjectStoreConnection con = store.getConnection(true, false)) { List<String> domains = con.lookupDomainByRole(roleMember, roleName); if (domains != null) { domList.setNames(domains); } } return domList; } List<String> listRoles(String domainName) { try (ObjectStoreConnection con = store.getConnection(true, false)) { return con.listRoles(domainName); } } Membership getMembership(String domainName, String roleName, String principal) { try (ObjectStoreConnection con = store.getConnection(true, false)) { Membership membership = con.getRoleMember(domainName, roleName, principal); Timestamp expiration = membership.getExpiration(); //need to check expiration and set isMember if expired if (expiration != null && expiration.millis() < System.currentTimeMillis()) { membership.setIsMember(false); } return membership; } } Role getRole(String domainName, String roleName, Boolean auditLog, Boolean expand) { try (ObjectStoreConnection con = store.getConnection(true, false)) { return getRole(con, domainName, roleName, auditLog, expand); } } Role getRole(ObjectStoreConnection con, String domainName, String roleName, Boolean auditLog, Boolean expand) { Role role = con.getRole(domainName, roleName); if (role != null) { if (role.getTrust() == null) { // if we have no trust field specified then we need to // retrieve our standard group role members role.setRoleMembers(con.listRoleMembers(domainName, roleName)); // still populate the members for old clients role.setMembers(ZMSUtils.convertRoleMembersToMembers( role.getRoleMembers())); if (auditLog == Boolean.TRUE) { role.setAuditLog(con.listRoleAuditLogs(domainName, roleName)); } } else if (expand == Boolean.TRUE) { // otherwise, if asked, let's expand the delegated // membership and return the list of members role.setRoleMembers(getDelegatedRoleMembers(domainName, role.getTrust(), roleName)); // still populate the members for old clients role.setMembers(ZMSUtils.convertRoleMembersToMembers(role.getRoleMembers())); } } return role; } List<RoleMember> getDelegatedRoleMembers(String domainName, String trustDomain, String roleName) { // verify that the domain and trust domain are not the same if (domainName.equals(trustDomain)) { return null; } // retrieve our trust domain AthenzDomain domain = null; try { domain = getAthenzDomain(trustDomain, false); } catch (ResourceException ignored) { } if (domain == null) { return null; } // we need to use a set since we might be matching // multiple assertions and we want to automatically // skip any duplicate members Map<String, RoleMember> roleMembers = new HashMap<>(); // generate our full role name String fullRoleName = ZMSUtils.roleResourceName(domainName, roleName); // iterate through all policies to see which one has the // assume_role assertion for the given role for (Policy policy : domain.getPolicies()) { List<Assertion> assertions = policy.getAssertions(); if (assertions == null) { continue; } for (Assertion assertion : assertions) { if (!ZMSUtils.assumeRoleResourceMatch(fullRoleName, assertion)) { continue; } String rolePattern = StringUtils.patternFromGlob(assertion.getRole()); for (Role role : domain.getRoles()) { // make sure we have members before trying to match the name List<RoleMember> members = role.getRoleMembers(); if (members == null || members.isEmpty()) { continue; } if (!role.getName().matches(rolePattern)) { continue; } for (RoleMember member : members) { String memberName = member.getMemberName(); if (!roleMembers.containsKey(memberName)) { roleMembers.put(memberName, member); } } } } } return new ArrayList<>(roleMembers.values()); } Policy getPolicy(String domainName, String policyName) { try (ObjectStoreConnection con = store.getConnection(true, false)) { return getPolicy(con, domainName, policyName); } } Assertion getAssertion(String domainName, String policyName, Long assertionId) { try (ObjectStoreConnection con = store.getConnection(true, false)) { return con.getAssertion(domainName, policyName, assertionId); } } void executePutAssertion(ResourceContext ctx, String domainName, String policyName, Assertion assertion, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(true, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, domainName, auditRef, caller); // now we need verify our quota check quotaCheck.checkPolicyAssertionQuota(con, domainName, policyName, caller); // process our insert assertion. since this is a "single" // operation, we are not using any transactions. if (!con.insertAssertion(domainName, policyName, assertion)) { throw ZMSUtils.requestError(caller + ": unable to insert assertion: " + " to policy: " + policyName, caller); } // update our policy and domain time-stamps, and invalidate local cache entry con.updatePolicyModTimestamp(domainName, policyName); con.updateDomainModTimestamp(domainName); cacheStore.invalidate(domainName); // audit log the request StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT); auditLogAssertion(auditDetails, assertion, true); auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT, policyName, auditDetails.toString()); return; } catch (ResourceException ex) { // otherwise check if we need to retry or return failure if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } void executeDeleteAssertion(ResourceContext ctx, String domainName, String policyName, Long assertionId, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(true, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, domainName, auditRef, caller); // process our delete assertion. since this is a "single" // operation, we are not using any transactions. if (!con.deleteAssertion(domainName, policyName, assertionId)) { throw ZMSUtils.requestError(caller + ": unable to delete assertion: " + assertionId + " from policy: " + policyName, caller); } // update our policy and domain time-stamps, and invalidate local cache entry con.updatePolicyModTimestamp(domainName, policyName); con.updateDomainModTimestamp(domainName); cacheStore.invalidate(domainName); // audit log the request final String auditDetails = "{\"policy\": \"" + policyName + "\", \"assertionId\": \"" + assertionId + "\"}"; auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE, policyName, auditDetails); return; } catch (ResourceException ex) { // otherwise check if we need to retry or return failure if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } List<String> listEntities(String domainName) { try (ObjectStoreConnection con = store.getConnection(true, false)) { return con.listEntities(domainName); } } Entity getEntity(String domainName, String entityName) { try (ObjectStoreConnection con = store.getConnection(true, false)) { return con.getEntity(domainName, entityName); } } Policy getPolicy(ObjectStoreConnection con, String domainName, String policyName) { Policy policy = con.getPolicy(domainName, policyName); if (policy != null) { policy.setAssertions(con.listAssertions(domainName, policyName)); } return policy; } List<String> listPolicies(String domainName) { try (ObjectStoreConnection con = store.getConnection(true, false)) { return con.listPolicies(domainName, null); } } List<String> listServiceIdentities(String domainName) { try (ObjectStoreConnection con = store.getConnection(true, false)) { return con.listServiceIdentities(domainName); } } void executePutDomainMeta(ResourceContext ctx, String domainName, DomainMeta meta, String auditRef, String caller) { int retryCount = defaultRetryCount; Domain domain; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met domain = checkDomainAuditEnabled(con, domainName, auditRef, caller); // now process the request Domain updatedDomain = new Domain() .setName(domain.getName()) .setEnabled(domain.getEnabled()) .setId(domain.getId()) .setAuditEnabled(meta.getAuditEnabled()) .setDescription(meta.getDescription()) .setOrg(meta.getOrg()); // we'll only update account/product id fields if the meta // object does not contain nulls if (meta.getAccount() == null && meta.getYpmId() == null) { updatedDomain.setAccount(domain.getAccount()); updatedDomain.setYpmId(domain.getYpmId()); } else { updatedDomain.setYpmId(meta.getYpmId()); updatedDomain.setAccount(meta.getAccount()); } // if meta application ID is null, update to existing application ID if (meta.getApplicationId() == null) { updatedDomain.setApplicationId(domain.getApplicationId()); } else { updatedDomain.setApplicationId(meta.getApplicationId()); } con.updateDomain(updatedDomain); con.commitChanges(); cacheStore.invalidate(domainName); // audit log the request StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT); auditLogDomain(auditDetails, updatedDomain); auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT, domainName, auditDetails.toString()); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } void executePutDomainTemplate(ResourceContext ctx, String domainName, DomainTemplate domainTemplate, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, domainName, auditRef, caller); // go through our list of templates and add the specified // roles and polices to our domain StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT); auditDetails.append("{\"add-templates\": "); boolean firstEntry = true; for (String templateName : domainTemplate.getTemplateNames()) { firstEntry = auditLogSeparator(auditDetails, firstEntry); if (!addSolutionTemplate(con, domainName, templateName, getPrincipalName(ctx), domainTemplate.getParams(), auditRef, auditDetails)) { con.rollbackChanges(); throw ZMSUtils.internalServerError("unable to put domain templates: " + domainName, caller); } } auditDetails.append("}"); // update our domain time-stamp and save changes saveChanges(con, domainName); // audit log the request auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT, domainName, auditDetails.toString()); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } void executeDeleteDomainTemplate(ResourceContext ctx, String domainName, String templateName, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, domainName, auditRef, caller); // go through our list of templates and add the specified // roles and polices to our domain StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT); auditDetails.append("{\"templates\": "); Template template = ZMSImpl.serverSolutionTemplates.get(templateName); if (!deleteSolutionTemplate(con, domainName, templateName, template, auditDetails)) { con.rollbackChanges(); throw ZMSUtils.internalServerError("unable to delete domain template: " + domainName, caller); } auditDetails.append("}"); // update our domain time-stamp and save changes saveChanges(con, domainName); // audit log the request auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE, domainName, auditDetails.toString()); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } boolean addSolutionTemplate(ObjectStoreConnection con, String domainName, String templateName, String admin, List<TemplateParam> templateParams, String auditRef, StringBuilder auditDetails) { auditDetails.append("{\"name\": \"").append(templateName).append('\"'); // we have already verified that our template is valid but // we'll just double check to make sure it's not null Template template = ZMSImpl.serverSolutionTemplates.get(templateName); if (template == null) { auditDetails.append("}"); return true; } boolean firstEntry = true; // iterate through roles in the list. // When adding a template, if the role does not exist in our domain // then insert it otherwise only apply the changes to the member list. // otherwise for delete request, we just the delete role List<Role> templateRoles = template.getRoles(); if (templateRoles != null) { for (Role role : templateRoles) { String roleName = ZMSUtils.removeDomainPrefix(role.getName(), TEMPLATE_DOMAIN_NAME, ROLE_PREFIX); // retrieve our original role Role originalRole = getRole(con, domainName, roleName, false, false); // now process the request Role templateRole = updateTemplateRole(role, domainName, roleName, templateParams); firstEntry = auditLogSeparator(auditDetails, firstEntry); auditDetails.append(" \"add-role\": "); if (!processRole(con, originalRole, domainName, roleName, templateRole, admin, auditRef, true, auditDetails)) { return false; } } } // iterate through policies in the list. // When adding a template, if the policy does not exist in our domain // then insert it otherwise only apply the changes to the assertions // otherwise for delete requests, we just delete the policy List<Policy> templatePolicies = template.getPolicies(); if (templatePolicies != null) { for (Policy policy : templatePolicies) { String policyName = ZMSUtils.removeDomainPrefix(policy.getName(), TEMPLATE_DOMAIN_NAME, POLICY_PREFIX); // retrieve our original policy Policy originalPolicy = getPolicy(con, domainName, policyName); // now process the request Policy templatePolicy = updateTemplatePolicy(policy, domainName, policyName, templateParams); firstEntry = auditLogSeparator(auditDetails, firstEntry); auditDetails.append(" \"add-policy\": "); if (!processPolicy(con, originalPolicy, domainName, policyName, templatePolicy, true, auditDetails)) { return false; } } } // iterate through service identities in the list. // When adding a template, if the service identity does not exist in our domain // then insert it otherwise only apply the changes // otherwise for delete requests, we just delete the service identity List<ServiceIdentity> templateServiceIdentities = template.getServices(); if (templateServiceIdentities != null) { for (ServiceIdentity serviceIdentity : templateServiceIdentities) { String serviceIdentityName = ZMSUtils.removeDomainPrefixForService(serviceIdentity.getName(), TEMPLATE_DOMAIN_NAME); // retrieve our original service ServiceIdentity originalServiceIdentity = getServiceIdentity(con, domainName, serviceIdentityName); // now process the request ServiceIdentity templateServiceIdentity = updateTemplateServiceIdentity(serviceIdentity, domainName, serviceIdentityName, templateParams); firstEntry = auditLogSeparator(auditDetails, firstEntry); auditDetails.append(" \"add-service\": "); if (!processServiceIdentity(con, originalServiceIdentity, domainName, serviceIdentityName, templateServiceIdentity, auditDetails)) { return false; } } } // if adding a template, only add if it is not in our current list // check to see if the template is already listed for the domain List<String> currentTemplateList = con.listDomainTemplates(domainName); if (!currentTemplateList.contains(templateName)) { con.insertDomainTemplate(domainName, templateName, null); } auditDetails.append("}"); return true; } boolean deleteSolutionTemplate(ObjectStoreConnection con, String domainName, String templateName, Template template, StringBuilder auditDetails) { auditDetails.append("{\"name\": \"").append(templateName).append('\"'); // we have already verified that our template is valid but // we'll just double check to make sure it's not null if (template == null) { auditDetails.append("}"); return true; } boolean firstEntry = true; // iterate through roles in the list. // When adding a template, if the role does not exist in our domain // then insert it otherwise only apply the changes to the member list. // otherwise for delete request, we just the delete role List<Role> templateRoles = template.getRoles(); if (templateRoles != null) { for (Role role : templateRoles) { String roleName = ZMSUtils.removeDomainPrefix(role.getName(), TEMPLATE_DOMAIN_NAME, ROLE_PREFIX); con.deleteRole(domainName, roleName); firstEntry = auditLogSeparator(auditDetails, firstEntry); auditDetails.append(" \"delete-role\": \"").append(roleName).append('\"'); } } // iterate through policies in the list. // When adding a template, if the policy does not exist in our domain // then insert it otherwise only apply the changes to the assertions // otherwise for delete requests, we just delete the policy List<Policy> templatePolicies = template.getPolicies(); if (templatePolicies != null) { for (Policy policy : templatePolicies) { String policyName = ZMSUtils.removeDomainPrefix(policy.getName(), TEMPLATE_DOMAIN_NAME, POLICY_PREFIX); con.deletePolicy(domainName, policyName); firstEntry = auditLogSeparator(auditDetails, firstEntry); auditDetails.append(" \"delete-policy\": \"").append(policyName).append('\"'); } } // iterate through services in the list. // When adding a template, if the service does not exist in our domain // then insert it otherwise only apply the changes // otherwise for delete requests, we just delete the service List<ServiceIdentity> templateServices = template.getServices(); if (templateServices != null) { for (ServiceIdentity serviceIdentity : templateServices) { String serviceName = ZMSUtils.removeDomainPrefixForService(serviceIdentity.getName(), TEMPLATE_DOMAIN_NAME); con.deleteServiceIdentity(domainName, serviceName); firstEntry = auditLogSeparator(auditDetails, firstEntry); auditDetails.append(" \"delete-service\": \"").append(serviceName).append('\"'); } } // if deleting a template, delete it from the current list con.deleteDomainTemplate(domainName, templateName, null); auditDetails.append("}"); return true; } Role updateTemplateRole(Role role, String domainName, String roleName, List<TemplateParam> params) { // first process our given role name and carry out any // requested substitutions String templateRoleName = roleName; if (params != null) { for (TemplateParam param : params) { final String paramKey = "_" + param.getName() + "_"; templateRoleName = templateRoleName.replace(paramKey, param.getValue()); } } Role templateRole = new Role() .setName(ZMSUtils.roleResourceName(domainName, templateRoleName)) .setTrust(role.getTrust()); List<RoleMember> roleMembers = role.getRoleMembers(); List<RoleMember> newMembers = new ArrayList<>(); if (roleMembers != null && !roleMembers.isEmpty()) { for (RoleMember roleMember : roleMembers) { RoleMember newRoleMember = new RoleMember(); // process our role members for any requested substitutions String memberName = roleMember.getMemberName().replace(TEMPLATE_DOMAIN_NAME, domainName); if (params != null) { for (TemplateParam param : params) { final String paramKey = "_" + param.getName() + "_"; memberName = memberName.replace(paramKey, param.getValue()); } } newRoleMember.setMemberName(memberName); newRoleMember.setExpiration(roleMember.getExpiration()); newMembers.add(newRoleMember); } } templateRole.setRoleMembers(newMembers); return templateRole; } Policy updateTemplatePolicy(Policy policy, String domainName, String policyName, List<TemplateParam> params) { // first process our given role name and carry out any // requested substitutions String templatePolicyName = policyName; if (params != null) { for (TemplateParam param : params) { final String paramKey = "_" + param.getName() + "_"; templatePolicyName = templatePolicyName.replace(paramKey, param.getValue()); } } Policy templatePolicy = new Policy().setName(ZMSUtils.policyResourceName(domainName, templatePolicyName)); List<Assertion> assertions = policy.getAssertions(); List<Assertion> newAssertions = new ArrayList<>(); if (assertions != null && !assertions.isEmpty()) { for (Assertion assertion : assertions) { Assertion newAssertion = new Assertion(); newAssertion.setAction(assertion.getAction()); newAssertion.setEffect(assertion.getEffect()); // process our assertion resource and role for any requested substitutions String resource = assertion.getResource().replace(TEMPLATE_DOMAIN_NAME, domainName); String role = assertion.getRole().replace(TEMPLATE_DOMAIN_NAME, domainName); if (params != null) { for (TemplateParam param : params) { final String paramKey = "_" + param.getName() + "_"; resource = resource.replace(paramKey, param.getValue()); role = role.replace(paramKey, param.getValue()); } } newAssertion.setResource(resource); newAssertion.setRole(role); newAssertions.add(newAssertion); } } templatePolicy.setAssertions(newAssertions); return templatePolicy; } ServiceIdentity updateTemplateServiceIdentity(ServiceIdentity serviceIdentity, String domainName, String serviceIdentityName, List<TemplateParam> params) { String templateServiceName = serviceIdentityName; if (params != null) { for (TemplateParam param : params) { final String paramKey = "_" + param.getName() + "_"; templateServiceName = templateServiceName.replace(paramKey, param.getValue()); } } ServiceIdentity templateServiceIdentity = new ServiceIdentity().setName(ZMSUtils.serviceResourceName(domainName, templateServiceName)); templateServiceIdentity.setDescription(serviceIdentity.getDescription()); templateServiceIdentity.setExecutable(serviceIdentity.getExecutable()); templateServiceIdentity.setGroup(serviceIdentity.getGroup()); templateServiceIdentity.setUser(serviceIdentity.getUser()); templateServiceIdentity.setProviderEndpoint(serviceIdentity.getProviderEndpoint()); List<PublicKeyEntry> publicKeyEntries = serviceIdentity.getPublicKeys(); List<PublicKeyEntry> newPublicKeyEntries = new ArrayList<>(); if (publicKeyEntries != null && !publicKeyEntries.isEmpty()) { for (PublicKeyEntry publicKeyEntry : publicKeyEntries) { PublicKeyEntry newPublicKeyEntry = new PublicKeyEntry(); newPublicKeyEntry.setId(publicKeyEntry.getId()); newPublicKeyEntry.setKey(publicKeyEntry.getKey()); newPublicKeyEntries.add(newPublicKeyEntry); } } templateServiceIdentity.setPublicKeys(newPublicKeyEntries); List<String> hosts = serviceIdentity.getHosts(); if (hosts != null) { templateServiceIdentity.setHosts(new ArrayList<>(hosts)); } return templateServiceIdentity; } void setupTenantAdminPolicy(String tenantDomain, String provSvcDomain, String provSvcName, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, tenantDomain, auditRef, caller); String domainAdminRole = ZMSUtils.roleResourceName(tenantDomain, ZMSConsts.ADMIN_ROLE_NAME); String serviceRoleResourceName = ZMSUtils.getTrustedResourceGroupRolePrefix(provSvcDomain, provSvcName, tenantDomain, null) + ZMSConsts.ADMIN_ROLE_NAME; // our tenant admin role/policy name final String tenancyResource = "tenancy." + provSvcDomain + '.' + provSvcName; String adminName = tenancyResource + ".admin"; String tenantAdminRole = ZMSUtils.roleResourceName(tenantDomain, adminName); // tenant admin role - if it already exists then we skip it // by default it has no members. if (con.getRole(tenantDomain, adminName) == null) { con.insertRole(tenantDomain, new Role().setName(tenantAdminRole)); } // tenant admin policy - check to see if this already exists. If it does // then we don't have anything to do if (con.getPolicy(tenantDomain, adminName) == null) { Policy adminPolicy = new Policy().setName(ZMSUtils.policyResourceName(tenantDomain, adminName)); con.insertPolicy(tenantDomain, adminPolicy); // we are going to create 2 assertions - one for the domain admin role // and another for the tenant admin role Assertion assertion = new Assertion().setRole(domainAdminRole) .setResource(serviceRoleResourceName).setAction(ZMSConsts.ACTION_ASSUME_ROLE) .setEffect(AssertionEffect.ALLOW); con.insertAssertion(tenantDomain, adminName, assertion); assertion = new Assertion().setRole(tenantAdminRole) .setResource(serviceRoleResourceName).setAction(ZMSConsts.ACTION_ASSUME_ROLE) .setEffect(AssertionEffect.ALLOW); con.insertAssertion(tenantDomain, adminName, assertion); // the tenant admin role must have the capability to provision // new resource groups in the domain which requires update // action capability on resource tenancy.<prov_domain>.<prov_svc> String tenantResourceName = tenantDomain + ":" + tenancyResource; assertion = new Assertion().setRole(tenantAdminRole) .setResource(tenantResourceName).setAction(ZMSConsts.ACTION_UPDATE) .setEffect(AssertionEffect.ALLOW); con.insertAssertion(tenantDomain, adminName, assertion); } // update our domain time-stamp and save changes saveChanges(con, tenantDomain); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } void executePutTenantRoles(ResourceContext ctx, String provSvcDomain, String provSvcName, String tenantDomain, String resourceGroup, List<TenantRoleAction> roles, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, provSvcDomain, auditRef, caller); String trustedRolePrefix = ZMSUtils.getTrustedResourceGroupRolePrefix(provSvcDomain, provSvcName, tenantDomain, resourceGroup); StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT); auditDetails.append("{\"put-tenant-roles\": ["); boolean firstEntry = true; for (TenantRoleAction ra : roles) { String tenantRole = ra.getRole(); String tenantAction = ra.getAction(); String trustedRole = trustedRolePrefix + tenantRole; String trustedName = trustedRole.substring((provSvcDomain + ":role.").length()); Role role = new Role().setName(trustedRole).setTrust(tenantDomain); if (LOG.isInfoEnabled()) { LOG.info(caller + ": add trusted Role to domain " + provSvcDomain + ": " + trustedRole + " -> " + role); } // retrieve our original role in case one exists Role originalRole = getRole(con, provSvcDomain, trustedName, false, false); // now process the request firstEntry = auditLogSeparator(auditDetails, firstEntry); auditDetails.append("{\"role\": "); if (!processRole(con, originalRole, provSvcDomain, trustedName, role, getPrincipalName(ctx), auditRef, false, auditDetails)) { con.rollbackChanges(); throw ZMSUtils.internalServerError("unable to put role: " + trustedRole, caller); } String policyResourceName = ZMSUtils.policyResourceName(provSvcDomain, trustedName); final String resourceName = provSvcDomain + ":service." + ZMSUtils.getTenantResourceGroupRolePrefix(provSvcName, tenantDomain, resourceGroup) + '*'; List<Assertion> assertions = Collections.singletonList( new Assertion().setRole(trustedRole) .setResource(resourceName) .setAction(tenantAction)); Policy policy = new Policy().setName(policyResourceName).setAssertions(assertions); if (LOG.isInfoEnabled()) { LOG.info(caller + ": add trust policy to domain " + provSvcDomain + ": " + trustedRole + " -> " + policy); } // retrieve our original policy Policy originalPolicy = getPolicy(con, provSvcDomain, trustedName); // now process the request auditDetails.append(", \"policy\": "); if (!processPolicy(con, originalPolicy, provSvcDomain, trustedName, policy, false, auditDetails)) { con.rollbackChanges(); throw ZMSUtils.internalServerError("unable to put policy: " + policy.getName(), caller); } auditDetails.append('}'); } // update our domain time-stamp and save changes saveChanges(con, provSvcDomain); // audit log the request auditLogRequest(ctx, provSvcDomain, auditRef, caller, ZMSConsts.HTTP_PUT, tenantDomain, auditDetails.toString()); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } void addAssumeRolePolicy(ObjectStoreConnection con, String rolePrefix, String trustedRolePrefix, String role, List<RoleMember> roleMembers, String tenantDomain, String admin, String auditRef, StringBuilder auditDetails, String caller) { // first create the role in the domain. We're going to create it // only if the role does not already exist String roleName = rolePrefix + role; String roleResourceName = ZMSUtils.roleResourceName(tenantDomain, roleName); // retrieve our original role in case one exists Role originalRole = getRole(con, tenantDomain, roleName, false, false); // we need to add the original role members to the new one if (originalRole != null && originalRole.getRoleMembers() != null) { roleMembers.addAll(originalRole.getRoleMembers()); } // now process the request Role roleObj = new Role().setName(roleResourceName).setRoleMembers(roleMembers); auditDetails.append("{\"role\": "); if (!processRole(con, originalRole, tenantDomain, roleName, roleObj, admin, auditRef, false, auditDetails)) { con.rollbackChanges(); throw ZMSUtils.internalServerError("unable to put role: " + roleName, caller); } // now create the corresponding policy. We're going to create it // only if the policy does not exist otherwise we'll just // add a new assertion String policyName = "tenancy." + roleName; String policyResourceName = ZMSUtils.policyResourceName(tenantDomain, policyName); String serviceRoleResourceName = trustedRolePrefix + role; Assertion assertion = new Assertion().setRole(roleResourceName) .setResource(serviceRoleResourceName).setAction(ZMSConsts.ACTION_ASSUME_ROLE) .setEffect(AssertionEffect.ALLOW); if (LOG.isInfoEnabled()) { LOG.info("executePutProviderRoles: ---- ASSUME_ROLE policyName is " + policyName); } // retrieve our original policy Policy originalPolicy = getPolicy(con, tenantDomain, policyName); // we need to add the original policy assertions to the new one List<Assertion> newAssertions = new ArrayList<>(); if (originalPolicy != null && originalPolicy.getAssertions() != null) { newAssertions.addAll(originalPolicy.getAssertions()); } // if our new assertion is not already in the list then that will be added to if (!newAssertions.contains(assertion)) { newAssertions.add(assertion); } // now process the request Policy assumeRolePolicy = new Policy().setName(policyResourceName).setAssertions(newAssertions); auditDetails.append(", \"policy\": "); if (!processPolicy(con, originalPolicy, tenantDomain, policyName, assumeRolePolicy, false, auditDetails)) { con.rollbackChanges(); throw ZMSUtils.internalServerError("unable to put policy: " + assumeRolePolicy.getName(), caller); } auditDetails.append('}'); } void executePutProviderRoles(ResourceContext ctx, String tenantDomain, String provSvcDomain, String provSvcName, String resourceGroup, List<String> roles, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, tenantDomain, auditRef, caller); // we're going to create a separate role for each one of tenant roles returned // based on its action and set the caller as a member in each role String principalName = getPrincipalName(ctx); List<RoleMember> roleMembers = new ArrayList<>(); if (principalName != null) { RoleMember roleMember = new RoleMember(); roleMember.setMemberName(principalName); roleMembers.add(roleMember); } // now set up the roles and policies for all the provider roles returned. String rolePrefix = ZMSUtils.getProviderResourceGroupRolePrefix(provSvcDomain, provSvcName, resourceGroup); String trustedRolePrefix = ZMSUtils.getTrustedResourceGroupRolePrefix(provSvcDomain, provSvcName, tenantDomain, resourceGroup); StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT); auditDetails.append("{\"put-provider-roles\": ["); boolean firstEntry = true; for (String role : roles) { role = role.toLowerCase(); if (LOG.isInfoEnabled()) { LOG.info("executePutProviderRoles: provision ASSUME_ROLE policy for access remote role in " + provSvcDomain + "." + provSvcName + ": " + resourceGroup + "." + role); } firstEntry = auditLogSeparator(auditDetails, firstEntry); addAssumeRolePolicy(con, rolePrefix, trustedRolePrefix, role, roleMembers, tenantDomain, principalName, auditRef, auditDetails, caller); } auditDetails.append("]}"); // update our domain time-stamp and save changes saveChanges(con, tenantDomain); // audit log the request auditLogRequest(ctx, tenantDomain, auditRef, caller, ZMSConsts.HTTP_PUT, provSvcDomain, auditDetails.toString()); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } void executeDeleteTenancy(ResourceContext ctx, String tenantDomain, String provSvcDomain, String provSvcName, String resourceGroup, String auditRef, String caller) { // create list of policies and delete them from the tenant domain // have to get all policies that match "tenant.<provider>.*" // ex: tenancy.weather.storage.admin String rnamePrefix = ZMSUtils.getProviderResourceGroupRolePrefix(provSvcDomain, provSvcName, resourceGroup); final String pnamePrefix = "tenancy." + rnamePrefix; int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, tenantDomain, auditRef, caller); // first let's process and remove any policies that start with our // provider prefix List<String> pnames = con.listPolicies(tenantDomain, null); for (String pname : pnames) { if (!validResourceGroupObjectToDelete(pname, pnamePrefix)) { if (LOG.isDebugEnabled()) { LOG.debug(caller + ": --ignore policy " + pname); } continue; } if (LOG.isInfoEnabled()) { LOG.info(caller + ": --delete policy " + pname); } con.deletePolicy(tenantDomain, pname); } // now we're going to find any roles that have the provider prefix as // well but we're going to be careful about removing them. We'll check // and if we have no more policies referencing them then we'll remove List<String> rnames = con.listRoles(tenantDomain); for (String rname : rnames) { if (!validResourceGroupObjectToDelete(rname, rnamePrefix)) { if (LOG.isDebugEnabled()) { LOG.debug(caller + ": --ignore role " + rname); } continue; } if (!con.listPolicies(tenantDomain, rname).isEmpty()) { if (LOG.isDebugEnabled()) { LOG.debug(caller + ": --ignore role " + rname + " due to active references"); } continue; } if (LOG.isInfoEnabled()) { LOG.info(caller + ": --delete role " + rname); } con.deleteRole(tenantDomain, rname); } // update our domain time-stamp and save changes saveChanges(con, tenantDomain); // audit log the request auditLogRequest(ctx, tenantDomain, auditRef, caller, ZMSConsts.HTTP_DELETE, ZMSUtils.entityResourceName(provSvcDomain, provSvcName), null); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } boolean validResourceGroupObjectToDelete(String name, String prefix) { if (!name.startsWith(prefix)) { return false; } // the suffix must be the action which should only be // simple-name thus it cannot contain any more .'s // otherwise we don't want to make a mistake // and match substring resource groups - e.g: // system.engine and system.engine.test return (name.indexOf('.', prefix.length()) == -1); } void executeDeleteTenantRoles(ResourceContext ctx, String provSvcDomain, String provSvcName, String tenantDomain, String resourceGroup, String auditRef, String caller) { // look for this tenants roles, ex: storage.tenant.sports.reader String rolePrefix = ZMSUtils.getTenantResourceGroupRolePrefix(provSvcName, tenantDomain, resourceGroup); int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(false, true)) { // first verify that auditing requirements are met checkDomainAuditEnabled(con, provSvcDomain, auditRef, caller); // find roles and policies matching the prefix List<String> rnames = con.listRoles(provSvcDomain); StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT); auditDetails.append("{\"tenant-roles\": ["); boolean firstEntry = true; for (String rname: rnames) { if (isTrustRoleForTenant(con, provSvcDomain, rname, rolePrefix, resourceGroup, tenantDomain)) { // good, its exactly what we are looking for con.deleteRole(provSvcDomain, rname); con.deletePolicy(provSvcDomain, rname); firstEntry = auditLogString(auditDetails, rname, firstEntry); } } auditDetails.append("]}"); // update our domain time-stamp and save changes saveChanges(con, provSvcDomain); // audit log the request auditLogRequest(ctx, tenantDomain, auditRef, caller, ZMSConsts.HTTP_DELETE, provSvcDomain, auditDetails.toString()); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } boolean isTrustRoleForTenant(ObjectStoreConnection con, String provSvcDomain, String roleName, String rolePrefix, String resourceGroup, String tenantDomain) { // first make sure the role name starts with the given prefix if (!isTenantRolePrefixMatch(con, roleName, rolePrefix, resourceGroup, tenantDomain)) { return false; } Role role = con.getRole(provSvcDomain, roleName); if (role == null) { return false; } // ensure it is a trust role for the tenant String trustDom = role.getTrust(); return trustDom != null && trustDom.equals(tenantDomain); } boolean isTrustRoleForTenant(String provSvcDomain, String roleName, String rolePrefix, String resourceGroup, String tenantDomain) { try (ObjectStoreConnection con = store.getConnection(true, false)) { return isTrustRoleForTenant(con, provSvcDomain, roleName, rolePrefix, resourceGroup, tenantDomain); } } boolean isTenantRolePrefixMatch(String roleName, String rolePrefix, String resourceGroup, String tenantDomain) { try (ObjectStoreConnection con = store.getConnection(true, false)) { return isTenantRolePrefixMatch(con, roleName, rolePrefix, resourceGroup, tenantDomain); } } boolean isTenantRolePrefixMatch(ObjectStoreConnection con, String roleName, String rolePrefix, String resourceGroup, String tenantDomain) { if (LOG.isDebugEnabled()) { LOG.debug("isTenantRolePrefixMatch: role-name=" + roleName + ", role-prefix=" + rolePrefix + ", reosurce-group=" + resourceGroup + ", tenant-domain=" + tenantDomain); } // first make sure the role name starts with the given prefix if (!roleName.startsWith(rolePrefix)) { return false; } // if we're dealing with a resource group then we need // to make sure we're not going to match a substring // resource group. Since we expect to see a SimpleName // action after the name, if we get another '.' then // we're dealing with a substring so the role does // match the expected format if (resourceGroup != null) { return (roleName.indexOf('.', rolePrefix.length()) == -1); } // otherwise we're going to split the remaining value // into components. If we have 2 components then we'll // check if we have a domain for the first component // if we don't then it's a resource group and as such // it can be removed otherwise, we'll leave it alone String[] comps = roleName.substring(rolePrefix.length()).split("\\."); if (comps.length == 2) { // check to see if we have a subdomain - if we do then // we're not going to include this role as we don't know // for sure if this for a resource group or not String subDomain = tenantDomain + "." + comps[0]; if (LOG.isDebugEnabled()) { LOG.debug("isTenantRolePrefixMatch: verifying tenant subdomain: " + subDomain); } return con.getDomain(subDomain) == null; } else { // if we have more than 2 subcomponents then we're // definitely not dealing with resource groups return comps.length <= 2; } } AthenzDomain getAthenzDomain(String domainName, boolean masterCopy) { // first check to see if we our data is in the cache AthenzDomain athenzDomain = getAthenzDomainFromCache(domainName, masterCopy); if (athenzDomain != null) { return athenzDomain; } try (ObjectStoreConnection con = store.getConnection(true, masterCopy)) { athenzDomain = con.getAthenzDomain(domainName); setMembersInDomain(athenzDomain); } DataCache dataCache = new DataCache(athenzDomain, athenzDomain.getDomain().getModified().millis()); cacheStore.put(domainName, dataCache); return athenzDomain; } private void setMembersInDomain(AthenzDomain athenzDomain) { List<Role> roleList = athenzDomain.getRoles(); if (roleList != null) { for (Role role: roleList) { List<RoleMember> roleMembers = role.getRoleMembers(); if (roleMembers != null) { List<String> members = role.getMembers(); if (members == null) { members = new ArrayList<>(); role.setMembers(members); } for (RoleMember roleMember: roleMembers) { members.add(roleMember.getMemberName()); } } } } } DomainModifiedList listModifiedDomains(long modifiedSince) { // since this is the operation executed by ZTS servers to // retrieve latest domain changes, we're going to use // the read-write store as oppose to read-only store to // get our up-to-date data try (ObjectStoreConnection con = store.getConnection(true, true)) { return con.listModifiedDomains(modifiedSince); } } boolean auditLogSeparator(StringBuilder auditDetails, boolean firstEntry) { if (!firstEntry) { auditDetails.append(','); } // regardless of the current state, the new state is no // longer the first entry so we return false return false; } void auditLogStrings(StringBuilder auditDetails, String label, Collection<String> values) { auditDetails.append(", \"").append(label).append("\": ["); boolean firstEntry = true; for (String value : values) { firstEntry = auditLogString(auditDetails, value, firstEntry); } auditDetails.append(']'); } boolean auditLogString(StringBuilder auditDetails, String value, boolean firstEntry) { firstEntry = auditLogSeparator(auditDetails, firstEntry); auditDetails.append('\"').append(value).append('\"'); return firstEntry; } void auditLogRoleMembers(StringBuilder auditDetails, String label, Collection<RoleMember> values) { auditDetails.append(", \"").append(label).append("\": ["); boolean firstEntry = true; for (RoleMember value : values) { String entry = value.getMemberName(); if (value.getExpiration() != null) { entry = entry + ":" + value.getExpiration().toString(); } firstEntry = auditLogString(auditDetails, entry, firstEntry); } auditDetails.append(']'); } void auditLogPublicKeyEntries(StringBuilder auditDetails, String label, List<PublicKeyEntry> values) { auditDetails.append(", \"").append(label).append("\": ["); boolean firstEntry = true; for (PublicKeyEntry value : values) { firstEntry = auditLogPublicKeyEntry(auditDetails, value, firstEntry); } auditDetails.append(']'); } void auditLogPublicKeyEntries(StringBuilder auditDetails, String label, Set<String> values, Map<String, PublicKeyEntry> publicKeysMap) { auditDetails.append(", \"").append(label).append("\": ["); boolean firstEntry = true; for (String value : values) { firstEntry = auditLogPublicKeyEntry(auditDetails, publicKeysMap.get(value), firstEntry); } auditDetails.append(']'); } void auditLogPublicKeyEntries(StringBuilder auditDetails, String label, Set<String> values) { auditDetails.append(", \"").append(label).append("\": ["); boolean firstEntry = true; for (String value : values) { firstEntry = auditLogPublicKeyEntry(auditDetails, value, firstEntry); } auditDetails.append(']'); } boolean auditLogPublicKeyEntry(StringBuilder auditDetails, PublicKeyEntry publicKey, boolean firstEntry) { firstEntry = auditLogSeparator(auditDetails, firstEntry); auditDetails.append("{\"key\": \"").append(publicKey.getKey()) .append("\", \"id\": \"").append(publicKey.getId()).append("\"}"); return firstEntry; } boolean auditLogPublicKeyEntry(StringBuilder auditDetails, String publicKeyId, boolean firstEntry) { firstEntry = auditLogSeparator(auditDetails, firstEntry); auditDetails.append("{\"id\": \"").append(publicKeyId).append("\"}"); return firstEntry; } void auditLogAssertions(StringBuilder auditDetails, String label, Collection<Assertion> values) { auditDetails.append(", \"").append(label).append("\": ["); boolean firstEntry = true; for (Assertion value : values) { firstEntry = auditLogAssertion(auditDetails, value, firstEntry); } auditDetails.append(']'); } boolean auditLogAssertion(StringBuilder auditDetails, Assertion assertion, boolean firstEntry) { firstEntry = auditLogSeparator(auditDetails, firstEntry); String assertionEffect = "ALLOW"; if (assertion.getEffect() != null) { assertionEffect = assertion.getEffect().toString(); } auditDetails.append("{\"role\": \"").append(assertion.getRole()) .append("\", \"action\": \"").append(assertion.getAction()) .append("\", \"effect\": \"").append(assertionEffect) .append("\", \"resource\": \"").append(assertion.getResource()) .append("\"}"); return firstEntry; } void auditLogDomain(StringBuilder auditDetails, Domain domain) { auditDetails.append("{\"description\": \"").append(domain.getDescription()) .append("\", \"org\": \"").append(domain.getOrg()) .append("\", \"auditEnabled\": \"").append(domain.getAuditEnabled()) .append("\", \"enabled\": \"").append(domain.getEnabled()) .append("\"}"); } void executePutQuota(ResourceContext ctx, String domainName, Quota quota, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(true, true)) { // process our insert quota. since this is a "single" // operation, we are not using any transactions. if (con.getQuota(domainName) != null) { con.updateQuota(domainName, quota); } else { con.insertQuota(domainName, quota); } auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT, domainName, null); return; } catch (ResourceException ex) { // otherwise check if we need to retry or return failure if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } void executeDeleteQuota(ResourceContext ctx, String domainName, String auditRef, String caller) { int retryCount = defaultRetryCount; do { try (ObjectStoreConnection con = store.getConnection(true, true)) { // process our delete quota request - it's a single // operation so no need to make it a transaction if (!con.deleteQuota(domainName)) { throw ZMSUtils.notFoundError(caller + ": unable to delete quota: " + domainName, caller); } // audit log the request auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE, domainName, null); return; } catch (ResourceException ex) { if (!shouldRetryOperation(ex, retryCount)) { throw ex; } } retryCount -= 1; } while (retryCount > 0); } public Quota getQuota(String domainName) { try (ObjectStoreConnection con = store.getConnection(true, false)) { return quotaCheck.getDomainQuota(con, domainName); } } }
1
4,704
I don't believe the change is sufficient to correctly handle variable substitutions in the name. As part of the process command we pass the original role object that was retreived without taking into account the substitution. So while the first template apply command will work fine because the original role does not exist thus it's a new add operation, if you try to re-apply the same template again, we'll pick up and process a wrong original name thus we'll try to execute an add operation again instead of modify. So we need to take into account the substituted name when we retrieve the original role as well. Same change should be applied to both policy and service blocks as well.
AthenZ-athenz
java
@@ -14,5 +14,6 @@ namespace MvvmCross.Base { Task ExecuteOnMainThreadAsync(Action action, bool maskExceptions = true); Task ExecuteOnMainThreadAsync(Func<Task> action, bool maskExceptions = true); + bool IsOnMainThread { get; } } }
1
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MS-PL license. // See the LICENSE file in the project root for more information. using System; using System.Threading.Tasks; namespace MvvmCross.Base { // Note: The long term goal should be to deprecate IMvxMainThreadDispatcher // As such, even though the implementation of this interface also implements // IMvxMainThreadDispatcher, this interface should not inherit from IMvxMainThreadDispatcher public interface IMvxMainThreadAsyncDispatcher { Task ExecuteOnMainThreadAsync(Action action, bool maskExceptions = true); Task ExecuteOnMainThreadAsync(Func<Task> action, bool maskExceptions = true); } }
1
14,318
Please add this to IMvxMainThreadDispatcher as well
MvvmCross-MvvmCross
.cs
@@ -223,7 +223,7 @@ module Bolt if @future to_expand = %w[private-key cacert token-file] & selected.keys to_expand.each do |opt| - selected[opt] = File.expand_path(selected[opt], @boltdir.path) if opt.is_a?(String) + selected[opt] = File.expand_path(selected[opt], @boltdir.path) if selected[opt].is_a?(String) end end
1
# frozen_string_literal: true require 'etc' require 'logging' require 'pathname' require 'bolt/boltdir' require 'bolt/transport/ssh' require 'bolt/transport/winrm' require 'bolt/transport/orch' require 'bolt/transport/local' require 'bolt/transport/local_windows' require 'bolt/transport/docker' require 'bolt/transport/remote' require 'bolt/util' module Bolt TRANSPORTS = { ssh: Bolt::Transport::SSH, winrm: Bolt::Transport::WinRM, pcp: Bolt::Transport::Orch, local: Bolt::Util.windows? ? Bolt::Transport::LocalWindows : Bolt::Transport::Local, docker: Bolt::Transport::Docker, remote: Bolt::Transport::Remote }.freeze class UnknownTransportError < Bolt::Error def initialize(transport, uri = nil) msg = uri.nil? ? "Unknown transport #{transport}" : "Unknown transport #{transport} found for #{uri}" super(msg, 'bolt/unknown-transport') end end class Config attr_accessor :concurrency, :format, :trace, :log, :puppetdb, :color, :save_rerun, :transport, :transports, :inventoryfile, :compile_concurrency, :boltdir, :puppetfile_config, :plugins, :plugin_hooks, :future attr_writer :modulepath TRANSPORT_OPTIONS = %i[password run-as sudo-password extensions sudo-executable private-key tty tmpdir user connect-timeout disconnect-timeout cacert token-file service-url interpreters file-protocol smb-port realm].freeze PUPPETFILE_OPTIONS = %w[proxy forge].freeze def self.default new(Bolt::Boltdir.new('.'), {}) end def self.from_boltdir(boltdir, overrides = {}) data = Bolt::Util.read_config_file(nil, [boltdir.config_file], 'config') || {} new(boltdir, data, overrides) end def self.from_file(configfile, overrides = {}) boltdir = Bolt::Boltdir.new(Pathname.new(configfile).expand_path.dirname) data = Bolt::Util.read_config_file(configfile, [], 'config') || {} new(boltdir, data, overrides) end def initialize(boltdir, config_data, overrides = {}) @logger = Logging.logger[self] @boltdir = boltdir @concurrency = 100 @compile_concurrency = Etc.nprocessors @transport = 'ssh' @format = 'human' @puppetdb = {} @color = true @save_rerun = true @puppetfile_config = {} @plugins = {} @plugin_hooks = {} # add an entry for the default console logger @log = { 'console' => {} } @transports = {} TRANSPORTS.each do |key, transport| @transports[key] = transport.default_options end update_from_file(config_data) apply_overrides(overrides) validate end def overwrite_transport_data(transport, transports) @transport = transport @transports = transports end def transport_data_get { transport: @transport, transports: @transports } end def deep_clone Bolt::Util.deep_clone(self) end def normalize_interpreters(interpreters) Bolt::Util.walk_keys(interpreters) do |key| key.chars[0] == '.' ? key : '.' + key end end def normalize_log(target) return target if target == 'console' target = target[5..-1] if target.start_with?('file:') if @future 'file:' + File.expand_path(target, @boltdir.path) else 'file:' + File.expand_path(target) end end def update_logs(logs) logs.each_pair do |k, v| log_name = normalize_log(k) @log[log_name] ||= {} log = @log[log_name] next unless v.is_a?(Hash) if v.key?('level') log[:level] = v['level'].to_s end if v.key?('append') log[:append] = v['append'] end end end def update_from_file(data) @future = data['future'] == true if data['log'].is_a?(Hash) update_logs(data['log']) end # Expand paths relative to the Boltdir. Any settings that came from the # CLI will already be absolute, so the expand will be skipped. if data.key?('modulepath') moduledirs = if data['modulepath'].is_a?(String) data['modulepath'].split(File::PATH_SEPARATOR) else data['modulepath'] end @modulepath = moduledirs.map do |moduledir| File.expand_path(moduledir, @boltdir.path) end end @inventoryfile = File.expand_path(data['inventoryfile'], @boltdir.path) if data.key?('inventoryfile') if data.key?('puppetfile') @puppetfile_config = data['puppetfile'].select { |k, _| PUPPETFILE_OPTIONS.include?(k) } end @hiera_config = File.expand_path(data['hiera-config'], @boltdir.path) if data.key?('hiera-config') @compile_concurrency = data['compile-concurrency'] if data.key?('compile-concurrency') @save_rerun = data['save-rerun'] if data.key?('save-rerun') @plugins = data['plugins'] if data.key?('plugins') @plugin_hooks = data['plugin_hooks'] if data.key?('plugin_hooks') %w[concurrency format puppetdb color].each do |key| send("#{key}=", data[key]) if data.key?(key) end update_transports(data) end private :update_from_file def apply_overrides(options) %i[concurrency transport format trace modulepath inventoryfile color].each do |key| send("#{key}=", options[key]) if options.key?(key) end @save_rerun = options[:'save-rerun'] if options.key?(:'save-rerun') if options[:debug] @log['console'][:level] = :debug end @compile_concurrency = options[:'compile-concurrency'] if options[:'compile-concurrency'] TRANSPORTS.each_key do |transport| transport = @transports[transport] TRANSPORT_OPTIONS.each do |key| if options[key] transport[key.to_s] = Bolt::Util.walk_keys(options[key], &:to_s) end end end if options.key?(:ssl) # this defaults to true so we need to check the presence of the key @transports[:winrm]['ssl'] = options[:ssl] end if options.key?(:'ssl-verify') # this defaults to true so we need to check the presence of the key @transports[:winrm]['ssl-verify'] = options[:'ssl-verify'] end if options.key?(:'host-key-check') # this defaults to true so we need to check the presence of the key @transports[:ssh]['host-key-check'] = options[:'host-key-check'] end end def update_from_inventory(data) update_transports(data) end def update_transports(data) TRANSPORTS.each do |key, impl| if data[key.to_s] selected = impl.filter_options(data[key.to_s]) if @future to_expand = %w[private-key cacert token-file] & selected.keys to_expand.each do |opt| selected[opt] = File.expand_path(selected[opt], @boltdir.path) if opt.is_a?(String) end end @transports[key] = Bolt::Util.deep_merge(@transports[key], selected) end if @transports[key]['interpreters'] @transports[key]['interpreters'] = normalize_interpreters(@transports[key]['interpreters']) end end @transport = data['transport'] if data.key?('transport') end def transport_conf { transport: @transport, transports: @transports } end def default_inventoryfile [@boltdir.inventory_file] end def rerunfile @boltdir.rerunfile end def hiera_config @hiera_config || @boltdir.hiera_config end def puppetfile @boltdir.puppetfile end def modulepath @modulepath || @boltdir.modulepath end def validate @log.each_pair do |name, params| if params.key?(:level) && !Bolt::Logger.valid_level?(params[:level]) raise Bolt::ValidationError, "level of log #{name} must be one of: #{Bolt::Logger.levels.join(', ')}; received #{params[:level]}" end if params.key?(:append) && params[:append] != true && params[:append] != false raise Bolt::ValidationError, "append flag of log #{name} must be a Boolean, received #{params[:append]}" end end unless @concurrency.is_a?(Integer) && @concurrency > 0 raise Bolt::ValidationError, 'Concurrency must be a positive integer' end unless @compile_concurrency.is_a?(Integer) && @compile_concurrency > 0 raise Bolt::ValidationError, 'Compile concurrency must be a positive integer' end compile_limit = 2 * Etc.nprocessors unless @compile_concurrency < compile_limit raise Bolt::ValidationError, "Compilation is CPU-intensive, set concurrency less than #{compile_limit}" end unless %w[human json].include? @format raise Bolt::ValidationError, "Unsupported format: '#{@format}'" end Bolt::Util.validate_file('hiera-config', @hiera_config) if @hiera_config unless @transport.nil? || Bolt::TRANSPORTS.include?(@transport.to_sym) raise UnknownTransportError, @transport end TRANSPORTS.each do |transport, impl| impl.validate(@transports[transport]) end end # Check if there is a case-insensitive match to the path def check_path_case(type, paths) return if paths.nil? matches = matching_paths(paths) if matches.any? msg = "WARNING: Bolt is case sensitive when specifying a #{type}. Did you mean:\n" matches.each { |path| msg += " #{path}\n" } @logger.warn msg end end def matching_paths(paths) [*paths].map { |p| Dir.glob([p, casefold(p)]) }.flatten.uniq.reject { |p| [*paths].include?(p) } end def casefold(path) path.chars.map do |l| l =~ /[A-Za-z]/ ? "[#{l.upcase}#{l.downcase}]" : l end.join end end end
1
13,556
Is it possible for opt not to be a string? I couldn't tell if this was a typo or if there is actually a case where it is not a string.
puppetlabs-bolt
rb
@@ -54,6 +54,7 @@ export default function UserInputSettings() { ctaLabel={ __( 'Let’s go', 'google-site-kit' ) } dismiss={ __( 'Remind me later', 'google-site-kit' ) } winImage={ global._googlesitekitLegacyData.admin.assetsRoot + personSitImage } + className="googlesitekit-user-input__notification" /> ); }
1
/** * UserInputSettings component. * * Site Kit by Google, Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * WordPress dependencies */ import { useInstanceId as useInstanceID } from '@wordpress/compose'; import { __ } from '@wordpress/i18n'; /** * Internal dependencies */ import Data from 'googlesitekit-data'; import Notification from './notification'; import { getTimeInSeconds } from '../../util'; import { STORE_NAME as CORE_USER } from '../../googlesitekit/datastore/user/constants'; import { STORE_NAME as CORE_SITE } from '../../googlesitekit/datastore/site/constants'; import personSitImage from '../../../images/person-sit.png'; const { useSelect } = Data; export default function UserInputSettings() { const instanceID = useInstanceID( UserInputSettings ); const ctaLink = useSelect( ( select ) => select( CORE_SITE ).getAdminURL( 'googlesitekit-user-input' ) ); const userInputState = useSelect( ( select ) => select( CORE_USER ).getUserInputState() ); if ( userInputState === 'completed' ) { return null; } return ( <Notification id={ `user-input-settings-notification-${ instanceID }` } title={ __( 'Customize Site Kit to match your goals', 'google-site-kit' ) } description={ __( 'Answer 5 questions and Site Kit will customize your dashboard with specific metrics and opportunities that match your site’s goals', 'google-site-kit' ) } format="large" isDismissable dismissExpires={ getTimeInSeconds( 'hour' ) * 3 } ctaLink={ ctaLink } ctaLabel={ __( 'Let’s go', 'google-site-kit' ) } dismiss={ __( 'Remind me later', 'google-site-kit' ) } winImage={ global._googlesitekitLegacyData.admin.assetsRoot + personSitImage } /> ); }
1
34,061
Let's move this up next to the `id` as we usually have `className` as one of the first props.
google-site-kit-wp
js
@@ -4,14 +4,14 @@ import sys from setuptools import setup, find_packages from setuptools.command.install import install -VERSION = "3.1.0" +VERSION = "3.1.1" def readme(): readme_short = """ Quilt is a data management tool designed for data discoverability, data dependency management, and data version control using `data packages <https://blog.quiltdata.com/data-packages-for-fast-reproducible-python-analysis-c74b78015c7f>`_. - The `quilt` PyPi package allows you to build, push, and pull data packages in Quilt using Python. + The `quilt3` PyPi package allows you to build, push, and pull data packages in Quilt using Python. Visit the `documentation quickstart <https://docs.quiltdata.com/quickstart>`_ for more information. """
1
import os import sys from setuptools import setup, find_packages from setuptools.command.install import install VERSION = "3.1.0" def readme(): readme_short = """ Quilt is a data management tool designed for data discoverability, data dependency management, and data version control using `data packages <https://blog.quiltdata.com/data-packages-for-fast-reproducible-python-analysis-c74b78015c7f>`_. The `quilt` PyPi package allows you to build, push, and pull data packages in Quilt using Python. Visit the `documentation quickstart <https://docs.quiltdata.com/quickstart>`_ for more information. """ return readme_short class VerifyVersionCommand(install): """Custom command to verify that the git tag matches our version""" description = 'verify that the git tag matches our version' def run(self): tag = os.getenv('CIRCLE_TAG') if tag != VERSION: info = "Git tag: {0} does not match the version of this app: {1}".format( tag, VERSION ) sys.exit(info) setup( name="quilt3", version=VERSION, packages=find_packages(), description='Quilt: where data comes together', long_description=readme(), python_requires='>=3.6', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ], author='quiltdata', author_email='[email protected]', license='LICENSE', url='https://github.com/quiltdata/quilt', keywords='', install_requires=[ 'appdirs>=1.4.0', 'aws-requests-auth>=0.4.2', 'boto3>=1.8.0', 'jsonlines==1.2.0', 'numpy>=1.14.0', # required by pandas, but missing from its dependencies. 'packaging>=16.8', 'pandas>=0.19.2', 'pyarrow>=0.14.1', # as of 7/5/19: linux/circleci bugs on 0.14.0 'requests>=2.12.4', 'ruamel.yaml<=0.15.70', 'tqdm>=4.26.0', 'urllib3<1.25,>=1.21.1', # required by requests 'xattr>=0.9.6; platform_system!="Windows"', 'humanize' ], extras_require={ 'tests': [ 'codecov', 'pytest<5.1.0', # TODO: Fix pytest.ensuretemp in conftest.py 'pytest-cov', 'responses', 'tox', 'detox', 'tox-pytest-summary', ], }, include_package_data=True, entry_points={ 'console_scripts': ['quilt3=quilt3.main:main'], }, cmdclass={ 'verify': VerifyVersionCommand, } )
1
17,815
While you're in here, "build, push and install"?
quiltdata-quilt
py
@@ -35,8 +35,6 @@ namespace OpenTelemetry.Metrics public string Description { get; set; } - public string Unit { get; set; } - public string[] TagKeys { get; set; } public virtual Aggregation Aggregation { get; set; }
1
// <copyright file="MetricStreamConfiguration.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> namespace OpenTelemetry.Metrics { // TODO: can be optimized like MetricType public enum Aggregation { #pragma warning disable SA1602 // Enumeration items should be documented Default, None, Sum, LastValue, Histogram, Drop = None, #pragma warning restore SA1602 // Enumeration items should be documented } public class MetricStreamConfiguration { public string Name { get; set; } public string Description { get; set; } public string Unit { get; set; } public string[] TagKeys { get; set; } public virtual Aggregation Aggregation { get; set; } // TODO: MetricPoints caps can be configured here } }
1
21,749
Unit never made it to spec...so removing.
open-telemetry-opentelemetry-dotnet
.cs
@@ -702,6 +702,9 @@ func (nl *NATSLatency) TotalTime() time.Duration { // ServiceLatency is the JSON message sent out in response to latency tracking for // exported services. type ServiceLatency struct { + Type string `json:"type"` + ID string `json:"id"` + Time string `json:"timestamp"` Status int `json:"status"` Error string `json:"description,omitempty"` AppName string `json:"app,omitempty"`
1
// Copyright 2018-2020 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "fmt" "io/ioutil" "math/rand" "net/http" "net/url" "reflect" "sort" "strconv" "strings" "sync" "time" "github.com/nats-io/jwt" ) // For backwards compatibility with NATS < 2.0, users who are not explicitly defined into an // account will be grouped in the default global account. const globalAccountName = DEFAULT_GLOBAL_ACCOUNT // Account are subject namespace definitions. By default no messages are shared between accounts. // You can share via Exports and Imports of Streams and Services. type Account struct { Name string Nkey string Issuer string claimJWT string updated time.Time mu sync.RWMutex sqmu sync.Mutex sl *Sublist ic *client isid uint64 etmr *time.Timer ctmr *time.Timer strack map[string]sconns nrclients int32 sysclients int32 nleafs int32 nrleafs int32 clients map[*client]*client rm map[string]int32 lqws map[string]int32 usersRevoked map[string]int64 actsRevoked map[string]int64 lleafs []*client imports importMap exports exportMap js *jsAccount jsLimits *JetStreamAccountLimits limits expired bool signingKeys []string srv *Server // server this account is registered with (possibly nil) lds string // loop detection subject for leaf nodes siReply []byte // service reply prefix, will form wildcard subscription. prand *rand.Rand } // Account based limits. type limits struct { mpay int32 msubs int32 mconns int32 mleafs int32 } // Used to track remote clients and leafnodes per remote server. type sconns struct { conns int32 leafs int32 } // Import stream mapping struct type streamImport struct { acc *Account from string prefix string claim *jwt.Import invalid bool } // Import service mapping struct type serviceImport struct { acc *Account claim *jwt.Import se *serviceExport sub *subscription from string to string exsub string ts int64 rt ServiceRespType latency *serviceLatency m1 *ServiceLatency rc *client hasWC bool response bool invalid bool tracking bool } // This is used to record when we create a mapping for implicit service // imports. We use this to clean up entries that are not singletons when // we detect that interest is no longer present. The key to the map will // be the actual interest. We record the mapped subject and the account. type serviceRespEntry struct { acc *Account msub string } // ServiceRespType represents the types of service request response types. type ServiceRespType uint8 // Service response types. Defaults to a singleton. const ( Singleton ServiceRespType = iota Streamed Chunked ) // String helper. func (rt ServiceRespType) String() string { switch rt { case Singleton: return "Singleton" case Streamed: return "Streamed" case Chunked: return "Chunked" } return "Unknown ServiceResType" } // exportAuth holds configured approvals or boolean indicating an // auth token is required for import. type exportAuth struct { tokenReq bool approved map[string]*Account } // streamExport type streamExport struct { exportAuth } // serviceExport holds additional information for exported services. type serviceExport struct { exportAuth acc *Account respType ServiceRespType latency *serviceLatency rtmr *time.Timer respThresh time.Duration } // Used to track service latency. type serviceLatency struct { sampling int8 subject string } // exportMap tracks the exported streams and services. type exportMap struct { streams map[string]*streamExport services map[string]*serviceExport responses map[string]*serviceImport } // importMap tracks the imported streams and services. // For services we will also track the response mappings as well. type importMap struct { streams []*streamImport services map[string]*serviceImport rrMap map[string][]*serviceRespEntry } // NewAccount creates a new unlimited account with the given name. func NewAccount(name string) *Account { a := &Account{ Name: name, limits: limits{-1, -1, -1, -1}, } return a } // Used to create shallow copies of accounts for transfer // from opts to real accounts in server struct. func (a *Account) shallowCopy() *Account { na := NewAccount(a.Name) na.Nkey = a.Nkey na.Issuer = a.Issuer na.imports = a.imports na.exports = a.exports na.jsLimits = a.jsLimits return na } // Called to track a remote server and connections and leafnodes it // has for this account. func (a *Account) updateRemoteServer(m *AccountNumConns) { a.mu.Lock() if a.strack == nil { a.strack = make(map[string]sconns) } // This does not depend on receiving all updates since each one is idempotent. // FIXME(dlc) - We should cleanup when these both go to zero. prev := a.strack[m.Server.ID] a.strack[m.Server.ID] = sconns{conns: int32(m.Conns), leafs: int32(m.LeafNodes)} a.nrclients += int32(m.Conns) - prev.conns a.nrleafs += int32(m.LeafNodes) - prev.leafs a.mu.Unlock() } // Removes tracking for a remote server that has shutdown. func (a *Account) removeRemoteServer(sid string) { a.mu.Lock() if a.strack != nil { prev := a.strack[sid] delete(a.strack, sid) a.nrclients -= prev.conns a.nrleafs -= prev.leafs } a.mu.Unlock() } // When querying for subject interest this is the number of // expected responses. We need to actually check that the entry // has active connections. func (a *Account) expectedRemoteResponses() (expected int32) { a.mu.RLock() for _, sc := range a.strack { if sc.conns > 0 || sc.leafs > 0 { expected++ } } a.mu.RUnlock() return } // Clears eventing and tracking for this account. func (a *Account) clearEventing() { a.mu.Lock() a.nrclients = 0 // Now clear state clearTimer(&a.etmr) clearTimer(&a.ctmr) a.clients = nil a.strack = nil a.mu.Unlock() } // GetName will return the accounts name. func (a *Account) GetName() string { if a == nil { return "n/a" } a.mu.RLock() name := a.Name a.mu.RUnlock() return name } // NumConnections returns active number of clients for this account for // all known servers. func (a *Account) NumConnections() int { a.mu.RLock() nc := len(a.clients) + int(a.nrclients) a.mu.RUnlock() return nc } // NumRemoteConnections returns the number of client or leaf connections that // are not on this server. func (a *Account) NumRemoteConnections() int { a.mu.RLock() nc := int(a.nrclients + a.nrleafs) a.mu.RUnlock() return nc } // NumLocalConnections returns active number of clients for this account // on this server. func (a *Account) NumLocalConnections() int { a.mu.RLock() nlc := a.numLocalConnections() a.mu.RUnlock() return nlc } // Do not account for the system accounts. func (a *Account) numLocalConnections() int { return len(a.clients) - int(a.sysclients) - int(a.nleafs) } // This is for extended local interest. // Lock should not be held. func (a *Account) numLocalAndLeafConnections() int { a.mu.RLock() nlc := len(a.clients) - int(a.sysclients) a.mu.RUnlock() return nlc } func (a *Account) numLocalLeafNodes() int { return int(a.nleafs) } // MaxTotalConnectionsReached returns if we have reached our limit for number of connections. func (a *Account) MaxTotalConnectionsReached() bool { var mtc bool a.mu.RLock() if a.mconns != jwt.NoLimit { mtc = len(a.clients)-int(a.sysclients)+int(a.nrclients) >= int(a.mconns) } a.mu.RUnlock() return mtc } // MaxActiveConnections return the set limit for the account system // wide for total number of active connections. func (a *Account) MaxActiveConnections() int { a.mu.RLock() mconns := int(a.mconns) a.mu.RUnlock() return mconns } // MaxTotalLeafNodesReached returns if we have reached our limit for number of leafnodes. func (a *Account) MaxTotalLeafNodesReached() bool { a.mu.RLock() mtc := a.maxTotalLeafNodesReached() a.mu.RUnlock() return mtc } func (a *Account) maxTotalLeafNodesReached() bool { if a.mleafs != jwt.NoLimit { return a.nleafs+a.nrleafs >= a.mleafs } return false } // NumLeafNodes returns the active number of local and remote // leaf node connections. func (a *Account) NumLeafNodes() int { a.mu.RLock() nln := int(a.nleafs + a.nrleafs) a.mu.RUnlock() return nln } // NumRemoteLeafNodes returns the active number of remote // leaf node connections. func (a *Account) NumRemoteLeafNodes() int { a.mu.RLock() nrn := int(a.nrleafs) a.mu.RUnlock() return nrn } // MaxActiveLeafNodes return the set limit for the account system // wide for total number of leavenode connections. // NOTE: these are tracked separately. func (a *Account) MaxActiveLeafNodes() int { a.mu.RLock() mleafs := int(a.mleafs) a.mu.RUnlock() return mleafs } // RoutedSubs returns how many subjects we would send across a route when first // connected or expressing interest. Local client subs. func (a *Account) RoutedSubs() int { a.mu.RLock() defer a.mu.RUnlock() return len(a.rm) } // TotalSubs returns total number of Subscriptions for this account. func (a *Account) TotalSubs() int { a.mu.RLock() defer a.mu.RUnlock() return int(a.sl.Count()) } // addClient keeps our accounting of local active clients or leafnodes updated. // Returns previous total. func (a *Account) addClient(c *client) int { a.mu.Lock() n := len(a.clients) if a.clients != nil { a.clients[c] = c } added := n != len(a.clients) if added { if c.kind == SYSTEM { a.sysclients++ } else if c.kind == LEAF { a.nleafs++ a.lleafs = append(a.lleafs, c) } } a.mu.Unlock() if c != nil && c.srv != nil && added { c.srv.accConnsUpdate(a) } return n } // Helper function to remove leaf nodes. If number of leafnodes gets large // this may need to be optimized out of linear search but believe number // of active leafnodes per account scope to be small and therefore cache friendly. // Lock should be held on account. func (a *Account) removeLeafNode(c *client) { ll := len(a.lleafs) for i, l := range a.lleafs { if l == c { a.lleafs[i] = a.lleafs[ll-1] if ll == 1 { a.lleafs = nil } else { a.lleafs = a.lleafs[:ll-1] } return } } } // removeClient keeps our accounting of local active clients updated. func (a *Account) removeClient(c *client) int { a.mu.Lock() n := len(a.clients) delete(a.clients, c) removed := n != len(a.clients) if removed { if c.kind == SYSTEM { a.sysclients-- } else if c.kind == LEAF { a.nleafs-- a.removeLeafNode(c) } } a.mu.Unlock() if c != nil && c.srv != nil && removed { c.srv.mu.Lock() doRemove := a != c.srv.gacc c.srv.mu.Unlock() if doRemove { c.srv.accConnsUpdate(a) } } return n } func (a *Account) randomClient() *client { if a.ic != nil { return a.ic } var c *client for _, c = range a.clients { break } return c } // AddServiceExport will configure the account with the defined export. func (a *Account) AddServiceExport(subject string, accounts []*Account) error { return a.AddServiceExportWithResponse(subject, Singleton, accounts) } // AddServiceExportWithResponse will configure the account with the defined export and response type. func (a *Account) AddServiceExportWithResponse(subject string, respType ServiceRespType, accounts []*Account) error { if a == nil { return ErrMissingAccount } a.mu.Lock() defer a.mu.Unlock() if a.exports.services == nil { a.exports.services = make(map[string]*serviceExport) } se := a.exports.services[subject] // Always create a service export if se == nil { se = &serviceExport{} } if respType != Singleton { se.respType = respType } if accounts != nil { // empty means auth required but will be import token. if len(accounts) == 0 { se.tokenReq = true } else { if se.approved == nil { se.approved = make(map[string]*Account, len(accounts)) } for _, acc := range accounts { se.approved[acc.Name] = acc } } } lrt := a.lowestServiceExportResponseTime() se.acc = a se.respThresh = DEFAULT_SERVICE_EXPORT_RESPONSE_THRESHOLD a.exports.services[subject] = se if nlrt := a.lowestServiceExportResponseTime(); nlrt != lrt { a.updateAllClientsServiceExportResponseTime(nlrt) } return nil } // TrackServiceExport will enable latency tracking of the named service. // Results will be published in this account to the given results subject. func (a *Account) TrackServiceExport(service, results string) error { return a.TrackServiceExportWithSampling(service, results, DEFAULT_SERVICE_LATENCY_SAMPLING) } // TrackServiceExportWithSampling will enable latency tracking of the named service for the given // sampling rate (1-100). Results will be published in this account to the given results subject. func (a *Account) TrackServiceExportWithSampling(service, results string, sampling int) error { if a == nil { return ErrMissingAccount } if sampling < 1 || sampling > 100 { return ErrBadSampling } if !IsValidPublishSubject(results) { return ErrBadPublishSubject } // Don't loop back on outselves. if a.IsExportService(results) { return ErrBadPublishSubject } if a.srv != nil && !a.srv.EventsEnabled() { return ErrNoSysAccount } a.mu.Lock() if a.exports.services == nil { a.mu.Unlock() return ErrMissingService } ea, ok := a.exports.services[service] if !ok { a.mu.Unlock() return ErrMissingService } if ea == nil { ea = &serviceExport{} a.exports.services[service] = ea } else if ea.respType != Singleton { a.mu.Unlock() return ErrBadServiceType } ea.latency = &serviceLatency{ sampling: int8(sampling), subject: results, } s := a.srv a.mu.Unlock() if s == nil { return nil } // Now track down the imports and add in latency as needed to enable. s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) acc.mu.Lock() for _, im := range acc.imports.services { if im != nil && im.acc.Name == a.Name && subjectIsSubsetMatch(im.to, service) { im.latency = ea.latency } } acc.mu.Unlock() return true }) return nil } // UnTrackServiceExport will disable latency tracking of the named service. func (a *Account) UnTrackServiceExport(service string) { if a == nil || (a.srv != nil && !a.srv.EventsEnabled()) { return } a.mu.Lock() if a == nil || a.exports.services == nil { a.mu.Unlock() return } ea, ok := a.exports.services[service] if !ok || ea == nil || ea.latency == nil { a.mu.Unlock() return } // We have latency here. ea.latency = nil s := a.srv a.mu.Unlock() if s == nil { return } // Now track down the imports and clean them up. s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) acc.mu.Lock() for _, im := range acc.imports.services { if im != nil && im.acc.Name == a.Name { if subjectIsSubsetMatch(im.to, service) { im.latency, im.m1 = nil, nil } } } acc.mu.Unlock() return true }) } // IsExportService will indicate if this service exists. Will check wildcard scenarios. func (a *Account) IsExportService(service string) bool { a.mu.RLock() defer a.mu.RUnlock() _, ok := a.exports.services[service] if ok { return true } tokens := strings.Split(service, tsep) for subj := range a.exports.services { if isSubsetMatch(tokens, subj) { return true } } return false } // IsExportServiceTracking will indicate if given publish subject is an export service with tracking enabled. func (a *Account) IsExportServiceTracking(service string) bool { a.mu.RLock() ea, ok := a.exports.services[service] if ok && ea == nil { a.mu.RUnlock() return false } if ok && ea != nil && ea.latency != nil { a.mu.RUnlock() return true } // FIXME(dlc) - Might want to cache this is in the hot path checking for latency tracking. tokens := strings.Split(service, tsep) for subj, ea := range a.exports.services { if isSubsetMatch(tokens, subj) && ea != nil && ea.latency != nil { a.mu.RUnlock() return true } } a.mu.RUnlock() return false } // NATSLatency represents the internal NATS latencies, including RTTs to clients. type NATSLatency struct { Requestor time.Duration `json:"req"` Responder time.Duration `json:"resp"` System time.Duration `json:"sys"` } // TotalTime is a helper function that totals the NATS latencies. func (nl *NATSLatency) TotalTime() time.Duration { return nl.Requestor + nl.Responder + nl.System } // ServiceLatency is the JSON message sent out in response to latency tracking for // exported services. type ServiceLatency struct { Status int `json:"status"` Error string `json:"description,omitempty"` AppName string `json:"app,omitempty"` RequestStart time.Time `json:"start"` ServiceLatency time.Duration `json:"svc"` NATSLatency NATSLatency `json:"nats"` TotalLatency time.Duration `json:"total"` } // Merge function to merge m1 and m2 (requestor and responder) measurements // when there are two samples. This happens when the requestor and responder // are on different servers. // // m2 ServiceLatency is correct, so use that. // m1 TotalLatency is correct, so use that. // Will use those to back into NATS latency. func (m1 *ServiceLatency) merge(m2 *ServiceLatency) { m1.AppName = m2.AppName m1.NATSLatency.System = m1.ServiceLatency - (m2.ServiceLatency + m2.NATSLatency.Responder) m1.ServiceLatency = m2.ServiceLatency m1.NATSLatency.Responder = m2.NATSLatency.Responder sanitizeLatencyMetric(m1) } // sanitizeLatencyMetric adjusts latency metric values that could go // negative in some edge conditions since we estimate client RTT // for both requestor and responder. // These numbers are never meant to be negative, it just could be // how we back into the values based on estimated RTT. func sanitizeLatencyMetric(sl *ServiceLatency) { if sl.ServiceLatency < 0 { sl.ServiceLatency = 0 } if sl.NATSLatency.System < 0 { sl.NATSLatency.System = 0 } } // Used for transporting remote latency measurements. type remoteLatency struct { Account string `json:"account"` ReqId string `json:"req_id"` M2 ServiceLatency `json:"m2"` respThresh time.Duration } // sendLatencyResult will send a latency result and clear the si of the requestor(rc). func (a *Account) sendLatencyResult(si *serviceImport, sl *ServiceLatency) { si.acc.mu.Lock() a.srv.sendInternalAccountMsg(a, si.latency.subject, sl) si.rc = nil si.acc.mu.Unlock() } // Used to send a bad request metric when we do not have a reply subject func (a *Account) sendBadRequestTrackingLatency(si *serviceImport, requestor *client) { sl := &ServiceLatency{ Status: 400, Error: "Bad Request", RequestStart: time.Now().Add(-requestor.getRTTValue()).UTC(), } a.sendLatencyResult(si, sl) } // Used to send a latency result when the requestor interest was lost before the // response could be delivered. func (a *Account) sendReplyInterestLostTrackLatency(si *serviceImport) { var reqClientRTT time.Duration if si.rc != nil { reqClientRTT = si.rc.getRTTValue() } reqStart := time.Unix(0, si.ts-int64(reqClientRTT)) sl := &ServiceLatency{ Status: 408, Error: "Request Timeout", RequestStart: reqStart.UTC(), NATSLatency: NATSLatency{ Requestor: reqClientRTT, }, } a.sendLatencyResult(si, sl) } func (a *Account) sendBackendErrorTrackingLatency(si *serviceImport, reason rsiReason) { var reqClientRTT time.Duration if si.rc != nil { reqClientRTT = si.rc.getRTTValue() } reqStart := time.Unix(0, si.ts-int64(reqClientRTT)) sl := &ServiceLatency{ RequestStart: reqStart.UTC(), NATSLatency: NATSLatency{ Requestor: reqClientRTT, }, } if reason == rsiNoDelivery { sl.Status = 503 sl.Error = "Service Unavailable" } else if reason == rsiTimeout { sl.Status = 504 sl.Error = "Service Timeout" } a.sendLatencyResult(si, sl) } // sendTrackingMessage will send out the appropriate tracking information for the // service request/response latency. This is called when the requestor's server has // received the response. // TODO(dlc) - holding locks for RTTs may be too much long term. Should revisit. func (a *Account) sendTrackingLatency(si *serviceImport, responder *client) bool { if si.rc == nil { return true } ts := time.Now() serviceRTT := time.Duration(ts.UnixNano() - si.ts) var requestor = si.rc var reqClientRTT = requestor.getRTTValue() var respClientRTT time.Duration var appName string if responder != nil && responder.kind == CLIENT { respClientRTT = responder.getRTTValue() appName = responder.GetName() } // We will estimate time when request left the requestor by time we received // and the client RTT for the requestor. reqStart := time.Unix(0, si.ts-int64(reqClientRTT)) sl := &ServiceLatency{ Status: 200, AppName: appName, RequestStart: reqStart.UTC(), ServiceLatency: serviceRTT - respClientRTT, NATSLatency: NATSLatency{ Requestor: reqClientRTT, Responder: respClientRTT, System: 0, }, TotalLatency: reqClientRTT + serviceRTT, } if respClientRTT > 0 { sl.NATSLatency.System = time.Since(ts) sl.TotalLatency += sl.NATSLatency.System } sanitizeLatencyMetric(sl) // If we are expecting a remote measurement, store our sl here. // We need to account for the race between this and us receiving the // remote measurement. // FIXME(dlc) - We need to clean these up but this should happen // already with the auto-expire logic. if responder != nil && responder.kind != CLIENT { si.acc.mu.Lock() if si.m1 != nil { m1, m2 := sl, si.m1 m1.merge(m2) si.acc.mu.Unlock() a.srv.sendInternalAccountMsg(a, si.latency.subject, m1) si.rc = nil return true } si.m1 = sl si.acc.mu.Unlock() return false } else { a.srv.sendInternalAccountMsg(a, si.latency.subject, sl) si.rc = nil } return true } // This will check to make sure our response lower threshold is set // properly in any clients doing rrTracking. // Lock should be held. func (a *Account) updateAllClientsServiceExportResponseTime(lrt time.Duration) { for _, c := range a.clients { c.mu.Lock() if c.rrTracking != nil && lrt != c.rrTracking.lrt { c.rrTracking.lrt = lrt if c.rrTracking.ptmr.Stop() { c.rrTracking.ptmr.Reset(lrt) } } c.mu.Unlock() } } // Will select the lowest respThresh from all service exports. // Read lock should be held. func (a *Account) lowestServiceExportResponseTime() time.Duration { // Lowest we will allow is 5 minutes. Its an upper bound for this function. lrt := time.Duration(5 * time.Minute) for _, se := range a.exports.services { if se.respThresh < lrt { lrt = se.respThresh } } return lrt } // AddServiceImportWithClaim will add in the service import via the jwt claim. func (a *Account) AddServiceImportWithClaim(destination *Account, from, to string, imClaim *jwt.Import) error { if destination == nil { return ErrMissingAccount } // Empty means use from. Also means we can use wildcards since we are not doing remapping. if !IsValidSubject(from) || (to != "" && (!IsValidLiteralSubject(from) || !IsValidLiteralSubject(to))) { return ErrInvalidSubject } // Empty means use from. if to == "" { to = from } // First check to see if the account has authorized us to route to the "to" subject. if !destination.checkServiceImportAuthorized(a, to, imClaim) { return ErrServiceImportAuthorization } _, err := a.addServiceImport(destination, from, to, imClaim) return err } // AddServiceImport will add a route to an account to send published messages / requests // to the destination account. From is the local subject to map, To is the // subject that will appear on the destination account. Destination will need // to have an import rule to allow access via addService. func (a *Account) AddServiceImport(destination *Account, from, to string) error { return a.AddServiceImportWithClaim(destination, from, to, nil) } // NumPendingReverseResponses returns the number of response mappings we have for all outstanding // requests for service imports. func (a *Account) NumPendingReverseResponses() int { a.mu.RLock() defer a.mu.RUnlock() return len(a.imports.rrMap) } // NumPendingAllResponses return the number of all responses outstanding for service exports. func (a *Account) NumPendingAllResponses() int { return a.NumPendingResponses("") } // NumResponsesPending returns the number of responses outstanding for service exports // on this account. An empty filter string returns all responses regardless of which export. // If you specify the filter we will only return ones that are for that export. // NOTE this is only for what this server is tracking. func (a *Account) NumPendingResponses(filter string) int { a.mu.RLock() defer a.mu.RUnlock() if filter == "" { return len(a.exports.responses) } se := a.getServiceExport(filter) if se == nil { return 0 } var nre int for _, si := range a.exports.responses { if si.se == se { nre++ } } return nre } // NumServiceImports returns the number of service imports we have configured. func (a *Account) NumServiceImports() int { a.mu.RLock() defer a.mu.RUnlock() return len(a.imports.services) } // Reason why we are removing this response serviceImport. type rsiReason int const ( rsiOk = rsiReason(iota) rsiNoDelivery rsiTimeout ) // removeRespServiceImport removes a response si mapping and the reverse entries for interest detection. func (a *Account) removeRespServiceImport(si *serviceImport, reason rsiReason) { if si == nil { return } a.mu.Lock() delete(a.exports.responses, si.from) dest := si.acc to := si.to if si.tracking && si.rc != nil { a.sendBackendErrorTrackingLatency(si, reason) } a.mu.Unlock() dest.checkForReverseEntry(to, si, false) } // removeServiceImport will remove the route by subject. func (a *Account) removeServiceImport(subject string) { a.mu.Lock() si, ok := a.imports.services[subject] delete(a.imports.services, subject) var sid []byte c := a.ic if ok && si != nil { if a.ic != nil && si.sub != nil && si.sub.sid != nil { sid = si.sub.sid } } a.mu.Unlock() if sid != nil { c.processUnsub(sid) } } // This tracks responses to service requests mappings. This is used for cleanup. func (a *Account) addReverseRespMapEntry(acc *Account, reply, from string) { a.mu.Lock() if a.imports.rrMap == nil { a.imports.rrMap = make(map[string][]*serviceRespEntry) } sre := &serviceRespEntry{acc, from} sra := a.imports.rrMap[reply] a.imports.rrMap[reply] = append(sra, sre) a.mu.Unlock() } // checkForReverseEntries is for when we are trying to match reverse entries to a wildcard. // This will be called from checkForReverseEntry when the reply arg is a wildcard subject. // This will usually be called in a go routine since we need to walk all the entries. func (a *Account) checkForReverseEntries(reply string, checkInterest bool) { a.mu.RLock() if len(a.imports.rrMap) == 0 { a.mu.RUnlock() return } if subjectIsLiteral(reply) { a.mu.RUnlock() a.checkForReverseEntry(reply, nil, checkInterest) return } var _rs [32]string rs := _rs[:0] for k := range a.imports.rrMap { if subjectIsSubsetMatch(k, reply) { rs = append(rs, k) } } a.mu.RUnlock() for _, reply := range rs { a.checkForReverseEntry(reply, nil, checkInterest) } } // This checks for any response map entries. If you specify an si we will only match and // clean up for that one, otherwise we remove them all. func (a *Account) checkForReverseEntry(reply string, si *serviceImport, checkInterest bool) { a.mu.RLock() if len(a.imports.rrMap) == 0 { a.mu.RUnlock() return } if subjectHasWildcard(reply) { a.mu.RUnlock() go a.checkForReverseEntries(reply, checkInterest) return } sres := a.imports.rrMap[reply] if sres == nil { a.mu.RUnlock() return } // If we are here we have an entry we should check. // If requested we will first check if there is any // interest for this subject for the entire account. // If there is we can not delete any entries yet. // Note that if we are here reply has to be a literal subject. if checkInterest { rr := a.sl.Match(reply) // If interest still exists we can not clean these up yet. if len(rr.psubs)+len(rr.qsubs) > 0 { a.mu.RUnlock() return } } a.mu.RUnlock() // Delete the appropriate entries here based on optional si. a.mu.Lock() if si == nil { delete(a.imports.rrMap, reply) } else { // Find the one we are looking for.. for i, sre := range sres { if sre.msub == si.from { sres = append(sres[:i], sres[i+1:]...) break } } if len(sres) > 0 { a.imports.rrMap[si.to] = sres } else { delete(a.imports.rrMap, si.to) } } a.mu.Unlock() // If we are here we no longer have interest and we have // response entries that we should clean up. if si == nil { for _, sre := range sres { acc := sre.acc var trackingCleanup bool var rsi *serviceImport acc.mu.Lock() if rsi = acc.exports.responses[sre.msub]; rsi != nil { delete(acc.exports.responses, rsi.from) trackingCleanup = rsi.tracking && rsi.rc != nil } acc.mu.Unlock() if trackingCleanup { acc.sendReplyInterestLostTrackLatency(rsi) } } } } // Add a service import. // This does no checks and should only be called by the msg processing code. Use // AddServiceImport from above if responding to user input or config changes, etc. func (a *Account) addServiceImport(dest *Account, from, to string, claim *jwt.Import) (*serviceImport, error) { rt := Singleton var lat *serviceLatency dest.mu.Lock() se := dest.getServiceExport(to) if se != nil { rt = se.respType lat = se.latency } dest.mu.Unlock() a.mu.Lock() if a.imports.services == nil { a.imports.services = make(map[string]*serviceImport) } else if dup := a.imports.services[from]; dup != nil { a.mu.Unlock() return nil, fmt.Errorf("duplicate service import subject %q, previously used in import for account %q, subject %q", from, dup.acc.Name, dup.to) } if to == "" { to = from } hasWC := subjectHasWildcard(from) si := &serviceImport{dest, claim, se, nil, from, to, "", 0, rt, lat, nil, nil, hasWC, false, false, false} a.imports.services[from] = si a.mu.Unlock() if err := a.addServiceImportSub(si); err != nil { a.removeServiceImport(si.from) return nil, err } return si, nil } // Returns the internal client, will create one if not present. // Lock should be held. func (a *Account) internalClient() *client { if a.ic == nil && a.srv != nil { a.ic = a.srv.createInternalAccountClient() a.ic.acc = a } return a.ic } // This will add an account subscription that matches the "from" from a service import entry. func (a *Account) addServiceImportSub(si *serviceImport) error { a.mu.Lock() c := a.internalClient() sid := strconv.FormatUint(a.isid+1, 10) a.mu.Unlock() // This will happen in parsing when the account has not been properly setup. if c == nil { return nil } if si.sub != nil { return fmt.Errorf("duplicate call to create subscription for service import") } sub, err := c.processSub([]byte(si.from+" "+sid), true) if err != nil { return err } sub.icb = func(sub *subscription, c *client, subject, reply string, msg []byte) { c.processServiceImport(si, a, msg) } a.mu.Lock() a.isid++ si.sub = sub a.mu.Unlock() return nil } // Remove all the subscriptions associated with service imports. func (a *Account) removeAllServiceImportSubs() { a.mu.RLock() var sids [][]byte for _, si := range a.imports.services { if si.sub != nil && si.sub.sid != nil { sids = append(sids, si.sub.sid) si.sub = nil } } c := a.ic a.ic = nil a.mu.RUnlock() if c == nil { return } for _, sid := range sids { c.processUnsub(sid) } c.closeConnection(InternalClient) } // Add in subscriptions for all registered service imports. func (a *Account) addAllServiceImportSubs() { for _, si := range a.imports.services { a.addServiceImportSub(si) } } // Helper to determine when to sample. func shouldSample(l *serviceLatency) bool { if l == nil || l.sampling <= 0 { return false } if l.sampling >= 100 { return true } return rand.Int31n(100) <= int32(l.sampling) } // Used to mimic client like replies. const ( replyPrefix = "_R_." trackSuffix = ".T" replyPrefixLen = len(replyPrefix) baseServerLen = 10 replyLen = 6 minReplyLen = 15 digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" base = 62 ) // This is where all service export responses are handled. func (a *Account) processServiceImportResponse(sub *subscription, c *client, subject, reply string, msg []byte) { a.mu.RLock() if a.expired || len(a.exports.responses) == 0 { a.mu.RUnlock() return } si := a.exports.responses[subject] if si == nil || si.invalid { a.mu.RUnlock() return } a.mu.RUnlock() // Send for normal processing. c.processServiceImport(si, a, msg) } // Will create a wildcard subscription to handle interest graph propagation for all // service replies. // Lock should not be held. func (a *Account) createRespWildcard() []byte { a.mu.Lock() if a.prand == nil { a.prand = rand.New(rand.NewSource(time.Now().UnixNano())) } var b = [baseServerLen]byte{'_', 'R', '_', '.'} rn := a.prand.Int63() for i, l := replyPrefixLen, rn; i < len(b); i++ { b[i] = digits[l%base] l /= base } a.siReply = append(b[:], '.') pre := a.siReply wcsub := append(a.siReply, '>') c := a.internalClient() a.isid += 1 sid := strconv.FormatUint(a.isid, 10) a.mu.Unlock() // Create subscription and internal callback for all the wildcard response subjects. if sub, _ := c.processSub([]byte(string(wcsub)+" "+sid), false); sub != nil { sub.icb = a.processServiceImportResponse } return pre } // Test whether this is a tracked reply. func isTrackedReply(reply []byte) bool { lreply := len(reply) - 1 return lreply > 3 && reply[lreply-1] == '.' && reply[lreply] == 'T' } // Generate a new service reply from the wildcard prefix. // FIXME(dlc) - probably do not have to use rand here. about 25ns per. func (a *Account) newServiceReply(tracking bool) []byte { a.mu.RLock() replyPre := a.siReply s := a.srv a.mu.RUnlock() if replyPre == nil { replyPre = a.createRespWildcard() } var b [replyLen]byte rn := a.prand.Int63() for i, l := 0, rn; i < len(b); i++ { b[i] = digits[l%base] l /= base } // Make sure to copy. reply := make([]byte, 0, len(replyPre)+len(b)) reply = append(reply, replyPre...) reply = append(reply, b[:]...) if tracking && s.sys != nil { // Add in our tracking identifier. This allows the metrics to get back to only // this server without needless SUBS/UNSUBS. reply = append(reply, '.') reply = append(reply, s.sys.shash...) reply = append(reply, '.', 'T') } return reply } // Checks if a serviceImport was created to map responses. func (si *serviceImport) isRespServiceImport() bool { return si != nil && si.response } // Sets the response theshold timer for a service export. // Account lock should be held func (se *serviceExport) setResponseThresholdTimer() { if se.rtmr != nil { return // Already set } se.rtmr = time.AfterFunc(se.respThresh, se.checkExpiredResponses) } // Account lock should be held func (se *serviceExport) clearResponseThresholdTimer() bool { if se.rtmr == nil { return true } stopped := se.rtmr.Stop() se.rtmr = nil return stopped } // checkExpiredResponses will check for any pending responses that need to // be cleaned up. func (se *serviceExport) checkExpiredResponses() { acc := se.acc if acc == nil { acc.mu.Lock() se.clearResponseThresholdTimer() acc.mu.Unlock() return } var expired []*serviceImport mints := time.Now().UnixNano() - int64(se.respThresh) // TODO(dlc) - Should we release lock while doing this? Or only do these in batches? // Should we break this up for responses only from this service export? // Responses live on acc directly for fast inbound processsing for the _R_ wildcard. // We could do another indirection at this level but just to get to the service export? var totalResponses int acc.mu.RLock() for _, si := range acc.exports.responses { if si.se == se { totalResponses++ if si.ts <= mints { expired = append(expired, si) } } } acc.mu.RUnlock() for _, si := range expired { acc.removeRespServiceImport(si, rsiTimeout) } // Pull out expired to determine if we have any left for timer. totalResponses -= len(expired) // Redo timer as needed. acc.mu.Lock() if totalResponses > 0 && se.rtmr != nil { se.rtmr.Stop() se.rtmr.Reset(se.respThresh) } else { se.clearResponseThresholdTimer() } acc.mu.Unlock() } // ServiceExportResponseThreshold returns the current threshold. func (a *Account) ServiceExportResponseThreshold(export string) (time.Duration, error) { a.mu.Lock() defer a.mu.Unlock() se := a.getServiceExport(export) if se == nil { return 0, fmt.Errorf("no export defined for %q", export) } return se.respThresh, nil } // SetServiceExportResponseThreshold sets the maximum time the system will a response to be delivered // from a service export responder. func (a *Account) SetServiceExportResponseThreshold(export string, maxTime time.Duration) error { a.mu.Lock() defer a.mu.Unlock() lrt := a.lowestServiceExportResponseTime() se := a.getServiceExport(export) if se == nil { return fmt.Errorf("no export defined for %q", export) } se.respThresh = maxTime if nlrt := a.lowestServiceExportResponseTime(); nlrt != lrt { a.updateAllClientsServiceExportResponseTime(nlrt) } return nil } // This is for internal service import responses. func (a *Account) addRespServiceImport(dest *Account, to string, osi *serviceImport) *serviceImport { tracking := shouldSample(osi.latency) nrr := string(osi.acc.newServiceReply(tracking)) a.mu.Lock() rt := osi.rt // dest is the requestor's account. a is the service responder with the export. se := osi.se // Marked as internal here, that is how we distinguish. si := &serviceImport{dest, nil, se, nil, nrr, to, osi.to, 0, rt, nil, nil, nil, false, true, false, false} if a.exports.responses == nil { a.exports.responses = make(map[string]*serviceImport) } a.exports.responses[nrr] = si // Always grab time and make sure response threshold timer is running. si.ts = time.Now().UnixNano() se.setResponseThresholdTimer() if rt == Singleton && tracking { si.latency = osi.latency si.tracking = true } a.mu.Unlock() // We do not do individual subscriptions here like we do on configured imports. // We have an internal callback for all responses inbound to this account and // will process appropriately there. This does not pollute the sublist and the caches. // We do add in the reverse map such that we can detect loss of interest and do proper // cleanup of this si as interest goes away. dest.addReverseRespMapEntry(a, to, nrr) return si } // AddStreamImportWithClaim will add in the stream import from a specific account with optional token. func (a *Account) AddStreamImportWithClaim(account *Account, from, prefix string, imClaim *jwt.Import) error { if account == nil { return ErrMissingAccount } // First check to see if the account has authorized export of the subject. if !account.checkStreamImportAuthorized(a, from, imClaim) { return ErrStreamImportAuthorization } // Check prefix if it exists and make sure its a literal. // Append token separator if not already present. if prefix != "" { // Make sure there are no wildcards here, this prefix needs to be a literal // since it will be prepended to a publish subject. if !subjectIsLiteral(prefix) { return ErrStreamImportBadPrefix } if prefix[len(prefix)-1] != btsep { prefix = prefix + string(btsep) } } a.mu.Lock() if a.isStreamImportDuplicate(account, from) { a.mu.Unlock() return ErrStreamImportDuplicate } a.imports.streams = append(a.imports.streams, &streamImport{account, from, prefix, imClaim, false}) a.mu.Unlock() return nil } // isStreamImportDuplicate checks for duplicate. // Lock should be held. func (a *Account) isStreamImportDuplicate(acc *Account, from string) bool { for _, si := range a.imports.streams { if si.acc == acc && si.from == from { return true } } return false } // AddStreamImport will add in the stream import from a specific account. func (a *Account) AddStreamImport(account *Account, from, prefix string) error { return a.AddStreamImportWithClaim(account, from, prefix, nil) } // IsPublicExport is a placeholder to denote a public export. var IsPublicExport = []*Account(nil) // AddStreamExport will add an export to the account. If accounts is nil // it will signify a public export, meaning anyone can impoort. func (a *Account) AddStreamExport(subject string, accounts []*Account) error { if a == nil { return ErrMissingAccount } a.mu.Lock() defer a.mu.Unlock() if a.exports.streams == nil { a.exports.streams = make(map[string]*streamExport) } ea := a.exports.streams[subject] if accounts != nil { if ea == nil { ea = &streamExport{} } // empty means auth required but will be import token. if len(accounts) == 0 { ea.tokenReq = true } else { if ea.approved == nil { ea.approved = make(map[string]*Account, len(accounts)) } for _, acc := range accounts { ea.approved[acc.Name] = acc } } } a.exports.streams[subject] = ea return nil } // Check if another account is authorized to import from us. func (a *Account) checkStreamImportAuthorized(account *Account, subject string, imClaim *jwt.Import) bool { // Find the subject in the exports list. a.mu.RLock() auth := a.checkStreamImportAuthorizedNoLock(account, subject, imClaim) a.mu.RUnlock() return auth } func (a *Account) checkStreamImportAuthorizedNoLock(account *Account, subject string, imClaim *jwt.Import) bool { if a.exports.streams == nil || !IsValidSubject(subject) { return false } return a.checkStreamExportApproved(account, subject, imClaim) } func (a *Account) checkAuth(ea *exportAuth, account *Account, imClaim *jwt.Import) bool { // if ea is nil or ea.approved is nil, that denotes a public export if ea == nil || (ea.approved == nil && !ea.tokenReq) { return true } // Check if token required if ea.tokenReq { return a.checkActivation(account, imClaim, true) } // If we have a matching account we are authorized _, ok := ea.approved[account.Name] return ok } func (a *Account) checkStreamExportApproved(account *Account, subject string, imClaim *jwt.Import) bool { // Check direct match of subject first ea, ok := a.exports.streams[subject] if ok { if ea == nil { return true } return a.checkAuth(&ea.exportAuth, account, imClaim) } // ok if we are here we did not match directly so we need to test each one. // The import subject arg has to take precedence, meaning the export // has to be a true subset of the import claim. We already checked for // exact matches above. tokens := strings.Split(subject, tsep) for subj, ea := range a.exports.streams { if isSubsetMatch(tokens, subj) { if ea == nil { return true } return a.checkAuth(&ea.exportAuth, account, imClaim) } } return false } func (a *Account) checkServiceExportApproved(account *Account, subject string, imClaim *jwt.Import) bool { // Check direct match of subject first se, ok := a.exports.services[subject] if ok { // if se is nil or eq.approved is nil, that denotes a public export if se == nil || (se.approved == nil && !se.tokenReq) { return true } // Check if token required if se.tokenReq { return a.checkActivation(account, imClaim, true) } // If we have a matching account we are authorized _, ok := se.approved[account.Name] return ok } // ok if we are here we did not match directly so we need to test each one. // The import subject arg has to take precedence, meaning the export // has to be a true subset of the import claim. We already checked for // exact matches above. tokens := strings.Split(subject, tsep) for subj, se := range a.exports.services { if isSubsetMatch(tokens, subj) { if se == nil || (se.approved == nil && !se.tokenReq) { return true } // Check if token required if se.tokenReq { return a.checkActivation(account, imClaim, true) } _, ok := se.approved[account.Name] return ok } } return false } // Helper function to get a serviceExport. // Lock should be held on entry. func (a *Account) getServiceExport(subj string) *serviceExport { se, ok := a.exports.services[subj] // The export probably has a wildcard, so lookup that up. if !ok { se = a.getWildcardServiceExport(subj) } return se } // This helper is used when trying to match a serviceExport record that is // represented by a wildcard. // Lock should be held on entry. func (a *Account) getWildcardServiceExport(from string) *serviceExport { tokens := strings.Split(from, tsep) for subj, se := range a.exports.services { if isSubsetMatch(tokens, subj) { return se } } return nil } // Will fetch the activation token for an import. func fetchActivation(url string) string { // FIXME(dlc) - Make configurable. c := &http.Client{Timeout: 2 * time.Second} resp, err := c.Get(url) if err != nil || resp == nil { return "" } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "" } return string(body) } // These are import stream specific versions for when an activation expires. func (a *Account) streamActivationExpired(exportAcc *Account, subject string) { a.mu.RLock() if a.expired || a.imports.streams == nil { a.mu.RUnlock() return } var si *streamImport for _, si = range a.imports.streams { if si.acc == exportAcc && si.from == subject { break } } if si == nil || si.invalid { a.mu.RUnlock() return } a.mu.RUnlock() if si.acc.checkActivation(a, si.claim, false) { // The token has been updated most likely and we are good to go. return } a.mu.Lock() si.invalid = true clients := make([]*client, 0, len(a.clients)) for _, c := range a.clients { clients = append(clients, c) } awcsti := map[string]struct{}{a.Name: {}} a.mu.Unlock() for _, c := range clients { c.processSubsOnConfigReload(awcsti) } } // These are import service specific versions for when an activation expires. func (a *Account) serviceActivationExpired(subject string) { a.mu.RLock() if a.expired || a.imports.services == nil { a.mu.RUnlock() return } si := a.imports.services[subject] if si == nil || si.invalid { a.mu.RUnlock() return } a.mu.RUnlock() if si.acc.checkActivation(a, si.claim, false) { // The token has been updated most likely and we are good to go. return } a.mu.Lock() si.invalid = true a.mu.Unlock() } // Fires for expired activation tokens. We could track this with timers etc. // Instead we just re-analyze where we are and if we need to act. func (a *Account) activationExpired(exportAcc *Account, subject string, kind jwt.ExportType) { switch kind { case jwt.Stream: a.streamActivationExpired(exportAcc, subject) case jwt.Service: a.serviceActivationExpired(subject) } } // checkActivation will check the activation token for validity. func (a *Account) checkActivation(importAcc *Account, claim *jwt.Import, expTimer bool) bool { if claim == nil || claim.Token == "" { return false } // Create a quick clone so we can inline Token JWT. clone := *claim // We grab the token from a URL by hand here since we need expiration etc. if url, err := url.Parse(clone.Token); err == nil && url.Scheme != "" { clone.Token = fetchActivation(url.String()) } vr := jwt.CreateValidationResults() clone.Validate(a.Name, vr) if vr.IsBlocking(true) { return false } act, err := jwt.DecodeActivationClaims(clone.Token) if err != nil { return false } vr = jwt.CreateValidationResults() act.Validate(vr) if vr.IsBlocking(true) { return false } if !a.isIssuerClaimTrusted(act) { return false } if act.Expires != 0 { tn := time.Now().Unix() if act.Expires <= tn { return false } if expTimer { expiresAt := time.Duration(act.Expires - tn) time.AfterFunc(expiresAt*time.Second, func() { importAcc.activationExpired(a, string(act.ImportSubject), claim.Type) }) } } // Check for token revocation.. if a.actsRevoked != nil { if t, ok := a.actsRevoked[act.Subject]; ok && t <= time.Now().Unix() { return false } } return true } // Returns true if the activation claim is trusted. That is the issuer matches // the account or is an entry in the signing keys. func (a *Account) isIssuerClaimTrusted(claims *jwt.ActivationClaims) bool { // if no issuer account, issuer is the account if claims.IssuerAccount == "" { return true } // If the IssuerAccount is not us, then this is considered an error. if a.Name != claims.IssuerAccount { if a.srv != nil { a.srv.Errorf("Invalid issuer account %q in activation claim (subject: %q - type: %q) for account %q", claims.IssuerAccount, claims.Activation.ImportSubject, claims.Activation.ImportType, a.Name) } return false } return a.hasIssuerNoLock(claims.Issuer) } // Returns true if `a` and `b` stream imports are the same. Note that the // check is done with the account's name, not the pointer. This is used // during config reload where we are comparing current and new config // in which pointers are different. // No lock is acquired in this function, so it is assumed that the // import maps are not changed while this executes. func (a *Account) checkStreamImportsEqual(b *Account) bool { if len(a.imports.streams) != len(b.imports.streams) { return false } // Load the b imports into a map index by what we are looking for. bm := make(map[string]*streamImport, len(b.imports.streams)) for _, bim := range b.imports.streams { bm[bim.acc.Name+bim.from+bim.prefix] = bim } for _, aim := range a.imports.streams { if _, ok := bm[aim.acc.Name+aim.from+aim.prefix]; !ok { return false } } return true } func (a *Account) checkStreamExportsEqual(b *Account) bool { if len(a.exports.streams) != len(b.exports.streams) { return false } for subj, aea := range a.exports.streams { bea, ok := b.exports.streams[subj] if !ok { return false } if !reflect.DeepEqual(aea, bea) { return false } } return true } func (a *Account) checkServiceExportsEqual(b *Account) bool { if len(a.exports.services) != len(b.exports.services) { return false } for subj, aea := range a.exports.services { bea, ok := b.exports.services[subj] if !ok { return false } if !reflect.DeepEqual(aea, bea) { return false } } return true } // Check if another account is authorized to route requests to this service. func (a *Account) checkServiceImportAuthorized(account *Account, subject string, imClaim *jwt.Import) bool { a.mu.RLock() authorized := a.checkServiceImportAuthorizedNoLock(account, subject, imClaim) a.mu.RUnlock() return authorized } // Check if another account is authorized to route requests to this service. func (a *Account) checkServiceImportAuthorizedNoLock(account *Account, subject string, imClaim *jwt.Import) bool { // Find the subject in the services list. if a.exports.services == nil { return false } return a.checkServiceExportApproved(account, subject, imClaim) } // IsExpired returns expiration status. func (a *Account) IsExpired() bool { a.mu.RLock() exp := a.expired a.mu.RUnlock() return exp } // Called when an account has expired. func (a *Account) expiredTimeout() { // Mark expired first. a.mu.Lock() a.expired = true a.mu.Unlock() // Collect the clients and expire them. cs := make([]*client, 0, len(a.clients)) a.mu.RLock() for c := range a.clients { cs = append(cs, c) } a.mu.RUnlock() for _, c := range cs { c.accountAuthExpired() } } // Sets the expiration timer for an account JWT that has it set. func (a *Account) setExpirationTimer(d time.Duration) { a.etmr = time.AfterFunc(d, a.expiredTimeout) } // Lock should be held func (a *Account) clearExpirationTimer() bool { if a.etmr == nil { return true } stopped := a.etmr.Stop() a.etmr = nil return stopped } // checkUserRevoked will check if a user has been revoked. func (a *Account) checkUserRevoked(nkey string) bool { a.mu.RLock() defer a.mu.RUnlock() if a.usersRevoked == nil { return false } if t, ok := a.usersRevoked[nkey]; !ok || t > time.Now().Unix() { return false } return true } // Check expiration and set the proper state as needed. func (a *Account) checkExpiration(claims *jwt.ClaimsData) { a.mu.Lock() defer a.mu.Unlock() a.clearExpirationTimer() if claims.Expires == 0 { a.expired = false return } tn := time.Now().Unix() if claims.Expires <= tn { a.expired = true return } expiresAt := time.Duration(claims.Expires - tn) a.setExpirationTimer(expiresAt * time.Second) a.expired = false } // hasIssuer returns true if the issuer matches the account // issuer or it is a signing key for the account. func (a *Account) hasIssuer(issuer string) bool { a.mu.RLock() hi := a.hasIssuerNoLock(issuer) a.mu.RUnlock() return hi } // hasIssuerNoLock is the unlocked version of hasIssuer func (a *Account) hasIssuerNoLock(issuer string) bool { // same issuer if a.Issuer == issuer { return true } for i := 0; i < len(a.signingKeys); i++ { if a.signingKeys[i] == issuer { return true } } return false } // Returns the loop detection subject used for leafnodes func (a *Account) getLDSubject() string { a.mu.RLock() lds := a.lds a.mu.RUnlock() return lds } // Placeholder for signaling token auth required. var tokenAuthReq = []*Account{} func authAccounts(tokenReq bool) []*Account { if tokenReq { return tokenAuthReq } return nil } // SetAccountResolver will assign the account resolver. func (s *Server) SetAccountResolver(ar AccountResolver) { s.mu.Lock() s.accResolver = ar s.mu.Unlock() } // AccountResolver returns the registered account resolver. func (s *Server) AccountResolver() AccountResolver { s.mu.Lock() ar := s.accResolver s.mu.Unlock() return ar } // updateAccountClaims will update an existing account with new claims. // This will replace any exports or imports previously defined. // Lock MUST NOT be held upon entry. func (s *Server) UpdateAccountClaims(a *Account, ac *jwt.AccountClaims) { if a == nil { return } s.Debugf("Updating account claims: %s", a.Name) a.checkExpiration(ac.Claims()) a.mu.Lock() // Clone to update, only select certain fields. old := &Account{Name: a.Name, exports: a.exports, limits: a.limits, signingKeys: a.signingKeys} // Reset exports and imports here. // Exports is creating a whole new map. a.exports = exportMap{} // Imports are checked unlocked in processInbound, so we can't change out the struct here. Need to process inline. if a.imports.streams != nil { old.imports.streams = a.imports.streams a.imports.streams = nil } if a.imports.services != nil { old.imports.services = make(map[string]*serviceImport, len(a.imports.services)) } for k, v := range a.imports.services { old.imports.services[k] = v delete(a.imports.services, k) } // Reset any notion of export revocations. a.actsRevoked = nil // update account signing keys a.signingKeys = nil signersChanged := false if len(ac.SigningKeys) > 0 { // insure copy the new keys and sort a.signingKeys = append(a.signingKeys, ac.SigningKeys...) sort.Strings(a.signingKeys) } if len(a.signingKeys) != len(old.signingKeys) { signersChanged = true } else { for i := 0; i < len(old.signingKeys); i++ { if a.signingKeys[i] != old.signingKeys[i] { signersChanged = true break } } } a.mu.Unlock() gatherClients := func() []*client { a.mu.RLock() clients := make([]*client, 0, len(a.clients)) for _, c := range a.clients { clients = append(clients, c) } a.mu.RUnlock() return clients } for _, e := range ac.Exports { switch e.Type { case jwt.Stream: s.Debugf("Adding stream export %q for %s", e.Subject, a.Name) if err := a.AddStreamExport(string(e.Subject), authAccounts(e.TokenReq)); err != nil { s.Debugf("Error adding stream export to account [%s]: %v", a.Name, err.Error()) } case jwt.Service: s.Debugf("Adding service export %q for %s", e.Subject, a.Name) rt := Singleton switch e.ResponseType { case jwt.ResponseTypeStream: rt = Streamed case jwt.ResponseTypeChunked: rt = Chunked } if err := a.AddServiceExportWithResponse(string(e.Subject), rt, authAccounts(e.TokenReq)); err != nil { s.Debugf("Error adding service export to account [%s]: %v", a.Name, err) } if e.Latency != nil { if err := a.TrackServiceExportWithSampling(string(e.Subject), string(e.Latency.Results), e.Latency.Sampling); err != nil { s.Debugf("Error adding latency tracking for service export to account [%s]: %v", a.Name, err) } } } // We will track these at the account level. Should not have any collisions. if e.Revocations != nil { a.mu.Lock() if a.actsRevoked == nil { a.actsRevoked = make(map[string]int64) } for k, t := range e.Revocations { a.actsRevoked[k] = t } a.mu.Unlock() } } for _, i := range ac.Imports { acc, err := s.lookupAccount(i.Account) if acc == nil || err != nil { s.Errorf("Can't locate account [%s] for import of [%v] %s (err=%v)", i.Account, i.Subject, i.Type, err) continue } switch i.Type { case jwt.Stream: s.Debugf("Adding stream import %s:%q for %s:%q", acc.Name, i.Subject, a.Name, i.To) if err := a.AddStreamImportWithClaim(acc, string(i.Subject), string(i.To), i); err != nil { s.Debugf("Error adding stream import to account [%s]: %v", a.Name, err.Error()) } case jwt.Service: // FIXME(dlc) - need to add in respThresh here eventually. s.Debugf("Adding service import %s:%q for %s:%q", acc.Name, i.Subject, a.Name, i.To) if err := a.AddServiceImportWithClaim(acc, string(i.Subject), string(i.To), i); err != nil { s.Debugf("Error adding service import to account [%s]: %v", a.Name, err.Error()) } } } // Now let's apply any needed changes from import/export changes. if !a.checkStreamImportsEqual(old) { awcsti := map[string]struct{}{a.Name: {}} for _, c := range gatherClients() { c.processSubsOnConfigReload(awcsti) } } // Now check if stream exports have changed. if !a.checkStreamExportsEqual(old) || signersChanged { clients := map[*client]struct{}{} // We need to check all accounts that have an import claim from this account. awcsti := map[string]struct{}{} s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) // Move to the next if this account is actually account "a". if acc.Name == a.Name { return true } // TODO: checkStreamImportAuthorized() stack should not be trying // to lock "acc". If we find that to be needed, we will need to // rework this to ensure we don't lock acc. acc.mu.Lock() for _, im := range acc.imports.streams { if im != nil && im.acc.Name == a.Name { // Check for if we are still authorized for an import. im.invalid = !a.checkStreamImportAuthorized(acc, im.from, im.claim) awcsti[acc.Name] = struct{}{} for _, c := range acc.clients { clients[c] = struct{}{} } } } acc.mu.Unlock() return true }) // Now walk clients. for c := range clients { c.processSubsOnConfigReload(awcsti) } } // Now check if service exports have changed. if !a.checkServiceExportsEqual(old) || signersChanged { s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) // Move to the next if this account is actually account "a". if acc.Name == a.Name { return true } // TODO: checkServiceImportAuthorized() stack should not be trying // to lock "acc". If we find that to be needed, we will need to // rework this to ensure we don't lock acc. acc.mu.Lock() for _, si := range acc.imports.services { if si != nil && si.acc.Name == a.Name { // Check for if we are still authorized for an import. si.invalid = !a.checkServiceImportAuthorized(acc, si.to, si.claim) if si.latency != nil && !si.response { // Make sure we should still be tracking latency. if se := a.getServiceExport(si.to); se != nil { si.latency = se.latency } } } } acc.mu.Unlock() return true }) } // Now do limits if they are present. a.mu.Lock() a.msubs = int32(ac.Limits.Subs) a.mpay = int32(ac.Limits.Payload) a.mconns = int32(ac.Limits.Conn) a.mleafs = int32(ac.Limits.LeafNodeConn) // Check for any revocations if len(ac.Revocations) > 0 { // We will always replace whatever we had with most current, so no // need to look at what we have. a.usersRevoked = make(map[string]int64, len(ac.Revocations)) for pk, t := range ac.Revocations { a.usersRevoked[pk] = t } } a.mu.Unlock() clients := gatherClients() // Sort if we are over the limit. if a.MaxTotalConnectionsReached() { sort.Slice(clients, func(i, j int) bool { return clients[i].start.After(clients[j].start) }) } now := time.Now().Unix() for i, c := range clients { a.mu.RLock() exceeded := a.mconns != jwt.NoLimit && i >= int(a.mconns) a.mu.RUnlock() if exceeded { c.maxAccountConnExceeded() continue } c.mu.Lock() c.applyAccountLimits() // Check for being revoked here. We use ac one to avoid // the account lock. var nkey string if c.user != nil { nkey = c.user.Nkey } c.mu.Unlock() // Check if we have been revoked. if ac.Revocations != nil { if t, ok := ac.Revocations[nkey]; ok && now >= t { c.sendErrAndDebug("User Authentication Revoked") c.closeConnection(Revocation) continue } } } // Check if the signing keys changed, might have to evict if signersChanged { for _, c := range clients { c.mu.Lock() sk := c.user.SigningKey c.mu.Unlock() if sk != "" && !a.hasIssuer(sk) { c.closeConnection(AuthenticationViolation) } } } } // Helper to build an internal account structure from a jwt.AccountClaims. // Lock MUST NOT be held upon entry. func (s *Server) buildInternalAccount(ac *jwt.AccountClaims) *Account { acc := NewAccount(ac.Subject) acc.Issuer = ac.Issuer // Set this here since we are placing in s.tmpAccounts below and may be // referenced by an route RS+, etc. s.setAccountSublist(acc) // We don't want to register an account that is in the process of // being built, however, to solve circular import dependencies, we // need to store it here. s.tmpAccounts.Store(ac.Subject, acc) s.UpdateAccountClaims(acc, ac) return acc } // Helper to build internal NKeyUser. func buildInternalNkeyUser(uc *jwt.UserClaims, acc *Account) *NkeyUser { nu := &NkeyUser{Nkey: uc.Subject, Account: acc} if uc.IssuerAccount != "" { nu.SigningKey = uc.Issuer } // Now check for permissions. var p *Permissions if len(uc.Pub.Allow) > 0 || len(uc.Pub.Deny) > 0 { if p == nil { p = &Permissions{} } p.Publish = &SubjectPermission{} p.Publish.Allow = uc.Pub.Allow p.Publish.Deny = uc.Pub.Deny } if len(uc.Sub.Allow) > 0 || len(uc.Sub.Deny) > 0 { if p == nil { p = &Permissions{} } p.Subscribe = &SubjectPermission{} p.Subscribe.Allow = uc.Sub.Allow p.Subscribe.Deny = uc.Sub.Deny } if uc.Resp != nil { if p == nil { p = &Permissions{} } p.Response = &ResponsePermission{ MaxMsgs: uc.Resp.MaxMsgs, Expires: uc.Resp.Expires, } validateResponsePermissions(p) } nu.Permissions = p return nu } // AccountResolver interface. This is to fetch Account JWTs by public nkeys type AccountResolver interface { Fetch(name string) (string, error) Store(name, jwt string) error } // MemAccResolver is a memory only resolver. // Mostly for testing. type MemAccResolver struct { sm sync.Map } // Fetch will fetch the account jwt claims from the internal sync.Map. func (m *MemAccResolver) Fetch(name string) (string, error) { if j, ok := m.sm.Load(name); ok { return j.(string), nil } return _EMPTY_, ErrMissingAccount } // Store will store the account jwt claims in the internal sync.Map. func (m *MemAccResolver) Store(name, jwt string) error { m.sm.Store(name, jwt) return nil } // URLAccResolver implements an http fetcher. type URLAccResolver struct { url string c *http.Client } // NewURLAccResolver returns a new resolver for the given base URL. func NewURLAccResolver(url string) (*URLAccResolver, error) { if !strings.HasSuffix(url, "/") { url += "/" } // FIXME(dlc) - Make timeout and others configurable. // We create our own transport to amortize TLS. tr := &http.Transport{ MaxIdleConns: 10, IdleConnTimeout: 30 * time.Second, } ur := &URLAccResolver{ url: url, c: &http.Client{Timeout: 2 * time.Second, Transport: tr}, } return ur, nil } // Fetch will fetch the account jwt claims from the base url, appending the // account name onto the end. func (ur *URLAccResolver) Fetch(name string) (string, error) { url := ur.url + name resp, err := ur.c.Get(url) if err != nil { return _EMPTY_, fmt.Errorf("could not fetch <%q>: %v", url, err) } else if resp == nil { return _EMPTY_, fmt.Errorf("could not fetch <%q>: no response", url) } else if resp.StatusCode != http.StatusOK { return _EMPTY_, fmt.Errorf("could not fetch <%q>: %v", url, resp.Status) } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return _EMPTY_, err } return string(body), nil } // Store is not implemented for URL Resolver. func (ur *URLAccResolver) Store(name, jwt string) error { return fmt.Errorf("Store operation not supported for URL Resolver") }
1
10,305
We have RequestStart which seems redundant a bit to this one, WDYT?
nats-io-nats-server
go
@@ -1,10 +1,4 @@ -import { - Component, - createElement, - _unmount as unmount, - options, - cloneElement -} from 'preact'; +import { Component, createElement, options, Fragment } from 'preact'; import { removeNode } from '../../src/util'; const oldCatchError = options._catchError;
1
import { Component, createElement, _unmount as unmount, options, cloneElement } from 'preact'; import { removeNode } from '../../src/util'; const oldCatchError = options._catchError; options._catchError = function(error, newVNode, oldVNode) { if (error.then && oldVNode) { /** @type {import('./internal').Component} */ let component; let vnode = newVNode; for (; (vnode = vnode._parent); ) { if ((component = vnode._component) && component._childDidSuspend) { if (oldVNode) { newVNode._dom = oldVNode._dom; newVNode._children = oldVNode._children; } component._childDidSuspend(error); return; // Don't call oldCatchError if we found a Suspense } } } oldCatchError(error, newVNode, oldVNode); }; function detachDom(children) { for (let i = 0; i < children.length; i++) { let child = children[i]; if (child != null) { if (typeof child.type !== 'function' && child._dom) { removeNode(child._dom); } else if (child._children) { detachDom(child._children); } } } } // having custom inheritance instead of a class here saves a lot of bytes export function Suspense(props) { // we do not call super here to golf some bytes... this._suspensions = []; this._fallback = props.fallback; } // Things we do here to save some bytes but are not proper JS inheritance: // - call `new Component()` as the prototype // - do not set `Suspense.prototype.constructor` to `Suspense` Suspense.prototype = new Component(); /** * @param {Promise} promise The thrown promise */ Suspense.prototype._childDidSuspend = function(promise) { /** @type {import('./internal').SuspenseComponent} */ const c = this; c._suspensions.push(promise); const onSuspensionComplete = () => { // From https://twitter.com/Rich_Harris/status/1125850391155965952 c._suspensions[c._suspensions.indexOf(promise)] = c._suspensions[c._suspensions.length - 1]; c._suspensions.pop(); if (c._suspensions.length == 0) { // If fallback is null, don't try to unmount it // `unmount` expects a real VNode, not null values if (c._fallback) { // Unmount current children (should be fallback) unmount(c._fallback); } c._vnode._dom = null; c._vnode._children = c.state._parkedChildren; c.setState({ _parkedChildren: null }); } }; if (c.state._parkedChildren == null) { c._fallback = c._fallback && cloneElement(c._fallback); c.setState({ _parkedChildren: c._vnode._children }); detachDom(c._vnode._children); c._vnode._children = []; } promise.then(onSuspensionComplete, onSuspensionComplete); }; Suspense.prototype.render = function(props, state) { return state._parkedChildren ? this._fallback : props.children; }; export function lazy(loader) { let prom; let component; let error; function Lazy(props) { if (!prom) { prom = loader(); prom.then( exports => { component = exports.default; }, e => { error = e; } ); } if (error) { throw error; } if (!component) { throw prom; } return createElement(component, props); } Lazy.displayName = 'Lazy'; Lazy._forwarded = true; return Lazy; }
1
14,655
I think we can remove this corresponding export from `preact` now! Double check no other s using though lol
preactjs-preact
js
@@ -78,6 +78,14 @@ ActiveRecord::Schema.define(version: 20140604204910) do t.datetime "updated_at" end + create_table "item_traits", force: true do |t| + t.text "name" + t.text "value" + t.integer "cart_item_id" + t.datetime "created_at" + t.datetime "updated_at" + end + create_table "user_roles", force: true do |t| t.integer "approval_group_id" t.integer "user_id"
1
# encoding: UTF-8 # This file is auto-generated from the current state of the database. Instead # of editing this file, please use the migrations feature of Active Record to # incrementally modify your database, and then regenerate this schema definition. # # Note that this schema.rb definition is the authoritative source for your # database schema. If you need to create the application database on another # system, you should be using db:schema:load, not running all the migrations # from scratch. The latter is a flawed and unsustainable approach (the more migrations # you'll amass, the slower it'll run and the greater likelihood for issues). # # It's strongly recommended that you check this file into your version control system. ActiveRecord::Schema.define(version: 20140604204910) do create_table "approval_groups", force: true do |t| t.string "name" t.datetime "created_at" t.datetime "updated_at" t.integer "cart_id" end create_table "approval_groups_users", id: false, force: true do |t| t.integer "approval_group_id" t.integer "user_id" end create_table "approvals", force: true do |t| t.integer "cart_id" t.integer "user_id" t.string "status" t.datetime "created_at" t.datetime "updated_at" t.string "role" end create_table "approver_comments", force: true do |t| t.text "comment_text" t.datetime "created_at" t.datetime "updated_at" t.integer "user_id" end create_table "cart_item_traits", force: true do |t| t.text "name" t.text "value" t.integer "cart_item_id" t.datetime "created_at" t.datetime "updated_at" end create_table "cart_items", force: true do |t| t.string "vendor" t.text "description" t.string "url" t.text "notes" t.integer "quantity" t.text "details" t.string "part_number" t.float "price" t.integer "cart_id" t.datetime "created_at" t.datetime "updated_at" end create_table "carts", force: true do |t| t.string "name" t.string "status" t.datetime "created_at" t.datetime "updated_at" t.integer "external_id" end create_table "comments", force: true do |t| t.text "comment_text" t.integer "cart_id" t.datetime "created_at" t.datetime "updated_at" end create_table "user_roles", force: true do |t| t.integer "approval_group_id" t.integer "user_id" t.string "role" end create_table "users", force: true do |t| t.string "email_address" t.string "first_name" t.string "last_name" t.datetime "created_at" t.datetime "updated_at" end end
1
11,849
I'm not sure why this would be in here. Were you working off a branch based off of master? These lines were removed in a previous commit because the table is actually called 'cart_item_traits'.
18F-C2
rb
@@ -30,6 +30,8 @@ module Travis bundler: false } + DEFAULT_GITHUB_ENDPOINT = "https://api.github.com" + attr_reader :data def initialize(data, defaults = {})
1
require 'core_ext/hash/deep_merge' require 'core_ext/hash/deep_symbolize_keys' # actually, the worker payload can be cleaned up a lot ... module Travis module Build class Data autoload :Env, 'travis/build/data/env' autoload :Var, 'travis/build/data/var' DEFAULTS = { timeouts: { # git_clone: 300, # git_fetch_ref: 300, # git_submodules: 300, # start_service: 60, # before_install: 300, # install: 600, # before_script: 600, # script: 1500, # after_success: 300, # after_failure: 300, # after_script: 300 } } DEFAULT_CACHES = { apt: false, bundler: false } attr_reader :data def initialize(data, defaults = {}) data = data.deep_symbolize_keys defaults = defaults.deep_symbolize_keys @data = DEFAULTS.deep_merge(defaults.deep_merge(data)) end def urls data[:urls] || {} end def timeouts data[:timeouts] || {} end def config data[:config] end def hosts data[:hosts] || {} end def skip_resolv_updates? !!data[:skip_resolv_updates] end def cache_options data[:cache_options] || {} end def cache(input = config[:cache]) case input when Hash then input when Array then input.map { |e| cache(e) }.inject(:merge) when String, Symbol then { input.to_sym => true } when nil then {} # for ruby 1.9 when false then Hash[DEFAULT_CACHES.each_key.with_object(false).to_a] else input.to_h end end def cache?(type, default = DEFAULT_CACHES[type]) type &&= type.to_sym !!cache.fetch(type) { default } end def env_vars @env_vars ||= Env.new(self).vars end def pull_request job[:pull_request] end def secure_env_enabled? job[:secure_env_enabled] end def source_host source_url =~ %r(^(?:https?|git)(?:://|@)([^/]*?)(?:/|:)) && $1 end def source_url repository[:source_url] end def slug repository[:slug] end def commit job[:commit] end def branch job[:branch] end def ref job[:ref] end def job data[:job] || {} end def build data[:source] || data[:build] || {} # TODO standarize the payload on :build end def repository data[:repository] || {} end def token data[:oauth_token] end end end end
1
10,970
Maybe `DEFAULT_GITHUB_API_ENDPOINT` would be a better name, since we refer to this as an API endpoint elsewhere?
travis-ci-travis-build
rb
@@ -45,7 +45,9 @@ const ( capabilityPrefix = "com.amazonaws.ecs.capability." capabilityTaskIAMRole = "task-iam-role" capabilityTaskIAMRoleNetHost = "task-iam-role-network-host" + capabilityTaskCPUMemLimit = "task-cpu-mem-limit" labelPrefix = "com.amazonaws.ecs." + attributePrefix = "ecs.capability." ) // DockerTaskEngine is a state machine for managing a task and its containers
1
// Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. // Package engine contains the core logic for managing tasks package engine import ( "errors" "fmt" "sync" "time" "golang.org/x/net/context" "github.com/aws/amazon-ecs-agent/agent/api" "github.com/aws/amazon-ecs-agent/agent/config" "github.com/aws/amazon-ecs-agent/agent/credentials" "github.com/aws/amazon-ecs-agent/agent/engine/dependencygraph" "github.com/aws/amazon-ecs-agent/agent/engine/dockerclient" "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" "github.com/aws/amazon-ecs-agent/agent/eventstream" "github.com/aws/amazon-ecs-agent/agent/statechange" "github.com/aws/amazon-ecs-agent/agent/statemanager" "github.com/aws/amazon-ecs-agent/agent/utils" utilsync "github.com/aws/amazon-ecs-agent/agent/utils/sync" "github.com/aws/amazon-ecs-agent/agent/utils/ttime" "github.com/cihub/seelog" ) const ( //DockerEndpointEnvVariable is the environment variable that can override the Docker endpoint DockerEndpointEnvVariable = "DOCKER_HOST" // DockerDefaultEndpoint is the default value for the Docker endpoint DockerDefaultEndpoint = "unix:///var/run/docker.sock" capabilityPrefix = "com.amazonaws.ecs.capability." capabilityTaskIAMRole = "task-iam-role" capabilityTaskIAMRoleNetHost = "task-iam-role-network-host" labelPrefix = "com.amazonaws.ecs." ) // DockerTaskEngine is a state machine for managing a task and its containers // in ECS. // // DockerTaskEngine implements an abstraction over the DockerGoClient so that // it does not have to know about tasks, only containers // The DockerTaskEngine interacts with Docker to implement a TaskEngine type DockerTaskEngine struct { // implements TaskEngine cfg *config.Config initialized bool mustInitLock sync.Mutex // state stores all tasks this task engine is aware of, including their // current state and mappings to/from dockerId and name. // This is used to checkpoint state to disk so tasks may survive agent // failures or updates state dockerstate.TaskEngineState managedTasks map[string]*managedTask taskStopGroup *utilsync.SequentialWaitGroup events <-chan DockerContainerChangeEvent stateChangeEvents chan statechange.Event saver statemanager.Saver client DockerClient clientLock sync.Mutex containerChangeEventStream *eventstream.EventStream stopEngine context.CancelFunc // processTasks is a mutex that the task engine must aquire before changing // any task's state which it manages. Since this is a lock that encompasses // all tasks, it must not aquire it for any significant duration // The write mutex should be taken when adding and removing tasks from managedTasks. processTasks sync.RWMutex enableConcurrentPull bool credentialsManager credentials.Manager _time ttime.Time _timeOnce sync.Once imageManager ImageManager } // NewDockerTaskEngine returns a created, but uninitialized, DockerTaskEngine. // The distinction between created and initialized is that when created it may // be serialized/deserialized, but it will not communicate with docker until it // is also initialized. func NewDockerTaskEngine(cfg *config.Config, client DockerClient, credentialsManager credentials.Manager, containerChangeEventStream *eventstream.EventStream, imageManager ImageManager, state dockerstate.TaskEngineState) *DockerTaskEngine { dockerTaskEngine := &DockerTaskEngine{ cfg: cfg, client: client, saver: statemanager.NewNoopStateManager(), state: state, managedTasks: make(map[string]*managedTask), taskStopGroup: utilsync.NewSequentialWaitGroup(), stateChangeEvents: make(chan statechange.Event), enableConcurrentPull: false, credentialsManager: credentialsManager, containerChangeEventStream: containerChangeEventStream, imageManager: imageManager, } return dockerTaskEngine } // ImagePullDeleteLock ensures that pulls and deletes do not run at the same time and pulls can be run at the same time for docker >= 1.11.1 // Pulls are serialized as a temporary workaround for a devicemapper issue. (see https://github.com/docker/docker/issues/9718) // Deletes must not run at the same time as pulls to prevent deletion of images that are being used to launch new tasks. var ImagePullDeleteLock sync.RWMutex // UnmarshalJSON restores a previously marshaled task-engine state from json func (engine *DockerTaskEngine) UnmarshalJSON(data []byte) error { return engine.state.UnmarshalJSON(data) } // MarshalJSON marshals into state directly func (engine *DockerTaskEngine) MarshalJSON() ([]byte, error) { return engine.state.MarshalJSON() } // Init initializes a DockerTaskEngine such that it may communicate with docker // and operate normally. // This function must be called before any other function, except serializing and deserializing, can succeed without error. func (engine *DockerTaskEngine) Init(ctx context.Context) error { // TODO, pass in a a context from main from background so that other things can stop us, not just the tests derivedCtx, cancel := context.WithCancel(ctx) engine.stopEngine = cancel // Determine whether the engine can perform concurrent "docker pull" based on docker version engine.enableConcurrentPull = engine.isParallelPullCompatible() // Open the event stream before we sync state so that e.g. if a container // goes from running to stopped after we sync with it as "running" we still // have the "went to stopped" event pending so we can be up to date. err := engine.openEventstream(derivedCtx) if err != nil { return err } engine.synchronizeState() // Now catch up and start processing new events per normal go engine.handleDockerEvents(derivedCtx) engine.initialized = true return nil } // SetDockerClient provides a way to override the client used for communication with docker as a testing hook. func (engine *DockerTaskEngine) SetDockerClient(client DockerClient) { engine.clientLock.Lock() engine.clientLock.Unlock() engine.client = client } // MustInit blocks and retries until an engine can be initialized. func (engine *DockerTaskEngine) MustInit(ctx context.Context) { if engine.initialized { return } engine.mustInitLock.Lock() defer engine.mustInitLock.Unlock() errorOnce := sync.Once{} taskEngineConnectBackoff := utils.NewSimpleBackoff(200*time.Millisecond, 2*time.Second, 0.20, 1.5) utils.RetryWithBackoff(taskEngineConnectBackoff, func() error { if engine.initialized { return nil } err := engine.Init(ctx) if err != nil { errorOnce.Do(func() { log.Error("Could not connect to docker daemon", "err", err) }) } return err }) } // SetSaver sets the saver that is used by the DockerTaskEngine func (engine *DockerTaskEngine) SetSaver(saver statemanager.Saver) { engine.saver = saver } // Shutdown makes a best-effort attempt to cleanup after the task engine. // This should not be relied on for anything more complicated than testing. func (engine *DockerTaskEngine) Shutdown() { engine.stopEngine() engine.Disable() } // Disable prevents this engine from managing any additional tasks. func (engine *DockerTaskEngine) Disable() { engine.processTasks.Lock() } // synchronizeState explicitly goes through each docker container stored in // "state" and updates its KnownStatus appropriately, as well as queueing up // events to push upstream. func (engine *DockerTaskEngine) synchronizeState() { engine.processTasks.Lock() defer engine.processTasks.Unlock() imageStates := engine.state.AllImageStates() if len(imageStates) != 0 { engine.imageManager.AddAllImageStates(imageStates) } tasks := engine.state.AllTasks() for _, task := range tasks { conts, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { engine.startTask(task) continue } for _, cont := range conts { if cont.DockerID == "" { log.Debug("Found container potentially created while we were down", "name", cont.DockerName) // Figure out the dockerid describedCont, err := engine.client.InspectContainer(cont.DockerName, inspectContainerTimeout) if err != nil { log.Warn("Could not find matching container for expected", "name", cont.DockerName) } else { cont.DockerID = describedCont.ID // update mappings that need dockerid engine.state.AddContainer(cont, task) engine.imageManager.RecordContainerReference(cont.Container) } } if cont.DockerID != "" { currentState, metadata := engine.client.DescribeContainer(cont.DockerID) if metadata.Error != nil { currentState = api.ContainerStopped if !cont.Container.KnownTerminal() { cont.Container.ApplyingError = api.NewNamedError(&ContainerVanishedError{}) log.Warn("Could not describe previously known container; assuming dead", "err", metadata.Error, "id", cont.DockerID, "name", cont.DockerName) engine.imageManager.RemoveContainerReferenceFromImageState(cont.Container) } } else { engine.imageManager.RecordContainerReference(cont.Container) } if currentState > cont.Container.GetKnownStatus() { cont.Container.SetKnownStatus(currentState) } } } engine.startTask(task) } engine.saver.Save() } // CheckTaskState inspects the state of all containers within a task and writes // their state to the managed task's container channel. func (engine *DockerTaskEngine) CheckTaskState(task *api.Task) { taskContainers, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { log.Warn("Could not check task state for task; no task in state", "task", task) return } for _, container := range task.Containers { dockerContainer, ok := taskContainers[container.Name] if !ok { continue } status, metadata := engine.client.DescribeContainer(dockerContainer.DockerID) engine.processTasks.RLock() managedTask, ok := engine.managedTasks[task.Arn] engine.processTasks.RUnlock() if ok { managedTask.dockerMessages <- dockerContainerChange{ container: container, event: DockerContainerChangeEvent{ Status: status, DockerContainerMetadata: metadata, }, } } } } // sweepTask deletes all the containers associated with a task func (engine *DockerTaskEngine) sweepTask(task *api.Task) { for _, cont := range task.Containers { err := engine.removeContainer(task, cont) if err != nil { log.Debug("Unable to remove old container", "err", err, "task", task, "cont", cont) } err = engine.imageManager.RemoveContainerReferenceFromImageState(cont) if err != nil { seelog.Errorf("Error removing container reference from image state: %v", err) } } engine.saver.Save() } func (engine *DockerTaskEngine) emitTaskEvent(task *api.Task, reason string) { taskKnownStatus := task.GetKnownStatus() if !taskKnownStatus.BackendRecognized() { return } if task.GetSentStatus() >= taskKnownStatus { log.Debug("Already sent task event; no need to re-send", "task", task.Arn, "event", taskKnownStatus.String()) return } event := api.TaskStateChange{ TaskArn: task.Arn, Status: taskKnownStatus, Reason: reason, Task: task, } log.Info("Task change event", "event", event) engine.stateChangeEvents <- event } // startTask creates a managedTask construct to track the task and then begins // pushing it towards its desired state when allowed startTask is protected by // the processTasks lock of 'AddTask'. It should not be called from anywhere // else and should exit quickly to allow AddTask to do more work. func (engine *DockerTaskEngine) startTask(task *api.Task) { // Create a channel that may be used to communicate with this task, survey // what tasks need to be waited for for this one to start, and then spin off // a goroutine to oversee this task thisTask := engine.newManagedTask(task) thisTask._time = engine.time() go thisTask.overseeTask() } func (engine *DockerTaskEngine) time() ttime.Time { engine._timeOnce.Do(func() { if engine._time == nil { engine._time = &ttime.DefaultTime{} } }) return engine._time } // emitContainerEvent passes a given event up through the containerEvents channel if necessary. // It will omit events the backend would not process and will perform best-effort deduplication of events. func (engine *DockerTaskEngine) emitContainerEvent(task *api.Task, cont *api.Container, reason string) { contKnownStatus := cont.GetKnownStatus() if !contKnownStatus.BackendRecognized() { return } if cont.IsInternal { return } if cont.GetSentStatus() >= contKnownStatus { log.Debug("Already sent container event; no need to re-send", "task", task.Arn, "container", cont.Name, "event", contKnownStatus.String()) return } if reason == "" && cont.ApplyingError != nil { reason = cont.ApplyingError.Error() } event := api.ContainerStateChange{ TaskArn: task.Arn, ContainerName: cont.Name, Status: contKnownStatus, ExitCode: cont.GetKnownExitCode(), PortBindings: cont.KnownPortBindings, Reason: reason, Container: cont, } log.Debug("Container change event", "event", event) engine.stateChangeEvents <- event log.Debug("Container change event passed on", "event", event) } // openEventstream opens, but does not consume, the docker event stream func (engine *DockerTaskEngine) openEventstream(ctx context.Context) error { events, err := engine.client.ContainerEvents(ctx) if err != nil { return err } engine.events = events return nil } // handleDockerEvents must be called after openEventstream; it processes each // event that it reads from the docker eventstream func (engine *DockerTaskEngine) handleDockerEvents(ctx context.Context) { for { select { case <-ctx.Done(): return case event := <-engine.events: ok := engine.handleDockerEvent(event) if !ok { break } } } } // handleDockerEvent is the entrypoint for task modifications originating with // events occurring through Docker, outside the task engine itself. // handleDockerEvent is responsible for taking an event that correlates to a // container and placing it in the context of the task to which that container // belongs. func (engine *DockerTaskEngine) handleDockerEvent(event DockerContainerChangeEvent) bool { log.Debug("Handling a docker event", "event", event) task, taskFound := engine.state.TaskByID(event.DockerID) cont, containerFound := engine.state.ContainerByID(event.DockerID) if !taskFound || !containerFound { log.Debug("Event for container not managed", "dockerId", event.DockerID) return false } engine.processTasks.RLock() managedTask, ok := engine.managedTasks[task.Arn] // hold the lock until the message is sent so we don't send on a closed channel defer engine.processTasks.RUnlock() if !ok { log.Crit("Could not find managed task corresponding to a docker event", "event", event, "task", task) return true } log.Debug("Writing docker event to the associated task", "task", task, "event", event) managedTask.dockerMessages <- dockerContainerChange{container: cont.Container, event: event} log.Debug("Wrote docker event to the associated task", "task", task, "event", event) return true } // StateChangeEvents returns channels to read task and container state changes. These // changes should be read as soon as possible as them not being read will block // processing the task referenced by the event. func (engine *DockerTaskEngine) StateChangeEvents() <-chan statechange.Event { return engine.stateChangeEvents } // AddTask starts tracking a task func (engine *DockerTaskEngine) AddTask(task *api.Task) error { task.PostUnmarshalTask(engine.credentialsManager) engine.processTasks.Lock() defer engine.processTasks.Unlock() existingTask, exists := engine.state.TaskByArn(task.Arn) if !exists { // This will update the container desired status task.UpdateDesiredStatus() engine.state.AddTask(task) if dependencygraph.ValidDependencies(task) { engine.startTask(task) } else { seelog.Errorf("Unable to progerss task with circular dependencies, task: %s", task.String()) task.SetKnownStatus(api.TaskStopped) task.SetDesiredStatus(api.TaskStopped) err := TaskDependencyError{task.Arn} engine.emitTaskEvent(task, err.Error()) } return nil } // Update task engine.updateTask(existingTask, task) return nil } type transitionApplyFunc (func(*api.Task, *api.Container) DockerContainerMetadata) // tryApplyTransition wraps the transitionApplyFunc provided func tryApplyTransition(task *api.Task, container *api.Container, to api.ContainerStatus, f transitionApplyFunc) DockerContainerMetadata { return f(task, container) } // ListTasks returns the tasks currently managed by the DockerTaskEngine func (engine *DockerTaskEngine) ListTasks() ([]*api.Task, error) { return engine.state.AllTasks(), nil } // GetTaskByArn returns the task identified by that ARN func (engine *DockerTaskEngine) GetTaskByArn(arn string) (*api.Task, bool) { return engine.state.TaskByArn(arn) } func (engine *DockerTaskEngine) pullContainer(task *api.Task, container *api.Container) DockerContainerMetadata { if engine.enableConcurrentPull { seelog.Infof("Pulling container %v concurrently. Task: %v", container, task) return engine.concurrentPull(task, container) } else { seelog.Infof("Pulling container %v serially. Task: %v", container, task) return engine.serialPull(task, container) } } func (engine *DockerTaskEngine) concurrentPull(task *api.Task, container *api.Container) DockerContainerMetadata { seelog.Debugf("Attempting to obtain ImagePullDeleteLock to pull image - %s. Task: %v", container.Image, task) ImagePullDeleteLock.RLock() seelog.Debugf("Acquired ImagePullDeleteLock, start pulling image - %s. Task: %v", container.Image, task) defer seelog.Debugf("Released ImagePullDeleteLock after pulling image - %s. Task: %v", container.Image, task) defer ImagePullDeleteLock.RUnlock() pullStart := time.Now() defer func(startTime time.Time) { seelog.Infof("Finished pulling container %v in %s. Task: %v", container.Image, time.Since(startTime).String(), task) }(pullStart) return engine.pullAndUpdateContainerReference(task, container) } func (engine *DockerTaskEngine) serialPull(task *api.Task, container *api.Container) DockerContainerMetadata { seelog.Debugf("Attempting to obtain ImagePullDeleteLock to pull image - %s. Task: %v", container.Image, task) ImagePullDeleteLock.Lock() seelog.Debugf("Acquired ImagePullDeleteLock, start pulling image - %s. Task: %v", container.Image, task) defer seelog.Debugf("Released ImagePullDeleteLock after pulling image - %s. Task: %v", container.Image, task) defer ImagePullDeleteLock.Unlock() pullStart := time.Now() defer func(startTime time.Time) { seelog.Infof("Finished pulling container %v in %s. Task: %v", container.Image, time.Since(startTime).String(), task) }(pullStart) return engine.pullAndUpdateContainerReference(task, container) } func (engine *DockerTaskEngine) pullAndUpdateContainerReference(task *api.Task, container *api.Container) DockerContainerMetadata { // If a task is blocked here for some time, and before it starts pulling image, // the task's desired status is set to stopped, then don't pull the image if task.GetDesiredStatus() == api.TaskStopped { seelog.Infof("Task desired status is stopped, skip pull container: %v, task %v", container, task) container.SetDesiredStatus(api.ContainerStopped) return DockerContainerMetadata{Error: TaskStoppedBeforePullBeginError{task.Arn}} } metadata := engine.client.PullImage(container.Image, container.RegistryAuthentication) err := engine.imageManager.RecordContainerReference(container) if err != nil { seelog.Errorf("Error adding container reference to image state: %v", err) } imageState := engine.imageManager.GetImageStateFromImageName(container.Image) engine.state.AddImageState(imageState) engine.saver.Save() return metadata } func (engine *DockerTaskEngine) createContainer(task *api.Task, container *api.Container) DockerContainerMetadata { log.Info("Creating container", "task", task, "container", container) client := engine.client if container.DockerConfig.Version != nil { client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version)) } // Resolve HostConfig // we have to do this in create, not start, because docker no longer handles // merging create config with start hostconfig the same; e.g. memory limits // get lost containerMap, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { containerMap = make(map[string]*api.DockerContainer) } hostConfig, hcerr := task.DockerHostConfig(container, containerMap) if hcerr != nil { return DockerContainerMetadata{Error: api.NamedError(hcerr)} } config, err := task.DockerConfig(container) if err != nil { return DockerContainerMetadata{Error: api.NamedError(err)} } // Augment labels with some metadata from the agent. Explicitly do this last // such that it will always override duplicates in the provided raw config // data. config.Labels[labelPrefix+"task-arn"] = task.Arn config.Labels[labelPrefix+"container-name"] = container.Name config.Labels[labelPrefix+"task-definition-family"] = task.Family config.Labels[labelPrefix+"task-definition-version"] = task.Version config.Labels[labelPrefix+"cluster"] = engine.cfg.Cluster name := "" for i := 0; i < len(container.Name); i++ { c := container.Name[i] if !((c <= '9' && c >= '0') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c == '-')) { continue } name += string(c) } containerName := "ecs-" + task.Family + "-" + task.Version + "-" + name + "-" + utils.RandHex() // Pre-add the container in case we stop before the next, more useful, // AddContainer call. This ensures we have a way to get the container if // we die before 'createContainer' returns because we can inspect by // name engine.state.AddContainer(&api.DockerContainer{DockerName: containerName, Container: container}, task) seelog.Infof("Created container name mapping for task %s - %s -> %s", task, container, containerName) engine.saver.ForceSave() metadata := client.CreateContainer(config, hostConfig, containerName, createContainerTimeout) if metadata.DockerID != "" { engine.state.AddContainer(&api.DockerContainer{DockerID: metadata.DockerID, DockerName: containerName, Container: container}, task) } seelog.Infof("Created docker container for task %s: %s -> %s", task, container, metadata.DockerID) return metadata } func (engine *DockerTaskEngine) startContainer(task *api.Task, container *api.Container) DockerContainerMetadata { log.Info("Starting container", "task", task, "container", container) client := engine.client if container.DockerConfig.Version != nil { client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version)) } containerMap, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { return DockerContainerMetadata{ Error: CannotStartContainerError{fmt.Errorf("Container belongs to unrecognized task %s", task.Arn)}, } } dockerContainer, ok := containerMap[container.Name] if !ok { return DockerContainerMetadata{ Error: CannotStartContainerError{fmt.Errorf("Container not recorded as created")}, } } return client.StartContainer(dockerContainer.DockerID, startContainerTimeout) } func (engine *DockerTaskEngine) stopContainer(task *api.Task, container *api.Container) DockerContainerMetadata { log.Info("Stopping container", "task", task, "container", container) containerMap, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { return DockerContainerMetadata{ Error: CannotStopContainerError{fmt.Errorf("Container belongs to unrecognized task %s", task.Arn)}, } } dockerContainer, ok := containerMap[container.Name] if !ok { return DockerContainerMetadata{ Error: CannotStopContainerError{fmt.Errorf("Container not recorded as created")}, } } return engine.client.StopContainer(dockerContainer.DockerID, stopContainerTimeout) } func (engine *DockerTaskEngine) removeContainer(task *api.Task, container *api.Container) error { log.Info("Removing container", "task", task, "container", container) containerMap, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { return errors.New("No such task: " + task.Arn) } dockerContainer, ok := containerMap[container.Name] if !ok { return errors.New("No container named '" + container.Name + "' created in " + task.Arn) } return engine.client.RemoveContainer(dockerContainer.DockerName, removeContainerTimeout) } // updateTask determines if a new transition needs to be applied to the // referenced task, and if needed applies it. It should not be called anywhere // but from 'AddTask' and is protected by the processTasks lock there. func (engine *DockerTaskEngine) updateTask(task *api.Task, update *api.Task) { managedTask, ok := engine.managedTasks[task.Arn] if !ok { log.Crit("ACS message for a task we thought we managed, but don't! Aborting.", "arn", task.Arn) return } // Keep the lock because sequence numbers cannot be correct unless they are // also read in the order addtask was called // This does block the engine's ability to ingest any new events (including // stops for past tasks, ack!), but this is necessary for correctness updateDesiredStatus := update.GetDesiredStatus() log.Debug("Putting update on the acs channel", "task", task.Arn, "status", updateDesiredStatus, "seqnum", update.StopSequenceNumber) transition := acsTransition{desiredStatus: updateDesiredStatus} transition.seqnum = update.StopSequenceNumber managedTask.acsMessages <- transition log.Debug("Update was taken off the acs channel", "task", task.Arn, "status", updateDesiredStatus) } // transitionFunctionMap provides the logic for the simple state machine of the // DockerTaskEngine. Each desired state maps to a function that can be called // to try and move the task to that desired state. func (engine *DockerTaskEngine) transitionFunctionMap() map[api.ContainerStatus]transitionApplyFunc { return map[api.ContainerStatus]transitionApplyFunc{ api.ContainerPulled: engine.pullContainer, api.ContainerCreated: engine.createContainer, api.ContainerRunning: engine.startContainer, api.ContainerStopped: engine.stopContainer, } } // applyContainerState moves the container to the given state by calling the // function defined in the transitionFunctionMap for the state func (engine *DockerTaskEngine) applyContainerState(task *api.Task, container *api.Container, nextState api.ContainerStatus) DockerContainerMetadata { clog := log.New("task", task, "container", container) transitionFunction, ok := engine.transitionFunctionMap()[nextState] if !ok { clog.Crit("Container desired to transition to an unsupported state", "state", nextState.String()) return DockerContainerMetadata{Error: &impossibleTransitionError{nextState}} } metadata := tryApplyTransition(task, container, nextState, transitionFunction) if metadata.Error != nil { clog.Info("Error transitioning container", "state", nextState.String(), "error", metadata.Error) } else { clog.Debug("Transitioned container", "state", nextState.String()) engine.saver.Save() } return metadata } // transitionContainer calls applyContainerState, and then notifies the managed // task of the change. transitionContainer is called by progressContainers and // by handleStoppedToRunningContainerTransition. func (engine *DockerTaskEngine) transitionContainer(task *api.Task, container *api.Container, to api.ContainerStatus) { // Let docker events operate async so that we can continue to handle ACS / other requests // This is safe because 'applyContainerState' will not mutate the task metadata := engine.applyContainerState(task, container, to) engine.processTasks.RLock() managedTask, ok := engine.managedTasks[task.Arn] if ok { managedTask.dockerMessages <- dockerContainerChange{ container: container, event: DockerContainerChangeEvent{ Status: to, DockerContainerMetadata: metadata, }, } } engine.processTasks.RUnlock() } // State is a function primarily meant for testing usage; it is explicitly not // part of the TaskEngine interface and should not be relied upon. // It returns an internal representation of the state of this DockerTaskEngine. func (engine *DockerTaskEngine) State() dockerstate.TaskEngineState { return engine.state } // Capabilities returns the supported capabilities of this agent / docker-client pair. // Currently, the following capabilities are possible: // // com.amazonaws.ecs.capability.privileged-container // com.amazonaws.ecs.capability.docker-remote-api.1.17 // com.amazonaws.ecs.capability.docker-remote-api.1.18 // com.amazonaws.ecs.capability.docker-remote-api.1.19 // com.amazonaws.ecs.capability.docker-remote-api.1.20 // com.amazonaws.ecs.capability.logging-driver.json-file // com.amazonaws.ecs.capability.logging-driver.syslog // com.amazonaws.ecs.capability.logging-driver.fluentd // com.amazonaws.ecs.capability.logging-driver.journald // com.amazonaws.ecs.capability.logging-driver.gelf // com.amazonaws.ecs.capability.selinux // com.amazonaws.ecs.capability.apparmor // com.amazonaws.ecs.capability.ecr-auth // com.amazonaws.ecs.capability.task-iam-role // com.amazonaws.ecs.capability.task-iam-role-network-host func (engine *DockerTaskEngine) Capabilities() []string { capabilities := []string{} if !engine.cfg.PrivilegedDisabled { capabilities = append(capabilities, capabilityPrefix+"privileged-container") } supportedVersions := make(map[dockerclient.DockerVersion]bool) // Determine API versions to report as supported. Supported versions are also used for capability-enablement, except // logging drivers. for _, version := range engine.client.SupportedVersions() { capabilities = append(capabilities, capabilityPrefix+"docker-remote-api."+string(version)) supportedVersions[version] = true } knownVersions := make(map[dockerclient.DockerVersion]struct{}) // Determine known API versions. Known versions are used exclusively for logging-driver enablement, since none of // the structural API elements change. for _, version := range engine.client.KnownVersions() { knownVersions[version] = struct{}{} } for _, loggingDriver := range engine.cfg.AvailableLoggingDrivers { requiredVersion := dockerclient.LoggingDriverMinimumVersion[loggingDriver] if _, ok := knownVersions[requiredVersion]; ok { capabilities = append(capabilities, capabilityPrefix+"logging-driver."+string(loggingDriver)) } } if engine.cfg.SELinuxCapable { capabilities = append(capabilities, capabilityPrefix+"selinux") } if engine.cfg.AppArmorCapable { capabilities = append(capabilities, capabilityPrefix+"apparmor") } if _, ok := supportedVersions[dockerclient.Version_1_19]; ok { capabilities = append(capabilities, capabilityPrefix+"ecr-auth") } if engine.cfg.TaskIAMRoleEnabled { // The "task-iam-role" capability is supported for docker v1.7.x onwards // Refer https://github.com/docker/docker/blob/master/docs/reference/api/docker_remote_api.md // to lookup the table of docker supportedVersions to API supportedVersions if _, ok := supportedVersions[dockerclient.Version_1_19]; ok { capabilities = append(capabilities, capabilityPrefix+capabilityTaskIAMRole) } else { seelog.Warn("Task IAM Role not enabled due to unsuppported Docker version") } } if engine.cfg.TaskIAMRoleEnabledForNetworkHost { // The "task-iam-role-network-host" capability is supported for docker v1.7.x onwards if _, ok := supportedVersions[dockerclient.Version_1_19]; ok { capabilities = append(capabilities, capabilityPrefix+capabilityTaskIAMRoleNetHost) } else { seelog.Warn("Task IAM Role for Host Network not enabled due to unsuppported Docker version") } } return capabilities } // Version returns the underlying docker version. func (engine *DockerTaskEngine) Version() (string, error) { return engine.client.Version() } // isParallelPullCompatible checks the docker version and return true if docker version >= 1.11.1 func (engine *DockerTaskEngine) isParallelPullCompatible() bool { version, err := engine.Version() if err != nil { seelog.Warnf("Failed to get docker version, err %v", err) return false } match, err := utils.Version(version).Matches(">=1.11.1") if err != nil { seelog.Warnf("Could not compare docker version, err %v", err) return false } if match { seelog.Debugf("Docker version: %v, enable concurrent pulling", version) return true } return false }
1
16,983
If/when you rebase from `dev`, this is going to cause a merge conflict. I'd suggest that soon after merging this PR as the capabilities code has been moved to "agent/app/agent_capabilities.go"
aws-amazon-ecs-agent
go
@@ -40,6 +40,14 @@ func Test_Start(t *testing.T) { assert.Equal(t, "bytecount 1", mockConnection.LastLine) } +func Test_Stop(t *testing.T) { + statsRecorder := fakeStatsRecorder{} + middleware := NewMiddleware(statsRecorder.record, 1*time.Second) + mockConnection := &management.MockConnection{} + middleware.Stop(mockConnection) + assert.Equal(t, "bytecount 0", mockConnection.LastLine) +} + func Test_ConsumeLine(t *testing.T) { var tests = []struct { line string
1
/* * Copyright (C) 2017 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package bytescount import ( "errors" "testing" "time" "github.com/mysteriumnetwork/node/openvpn/management" "github.com/stretchr/testify/assert" ) func Test_Factory(t *testing.T) { statsRecorder := fakeStatsRecorder{} middleware := NewMiddleware(statsRecorder.record, 1*time.Second) assert.NotNil(t, middleware) } func Test_Start(t *testing.T) { statsRecorder := fakeStatsRecorder{} middleware := NewMiddleware(statsRecorder.record, 1*time.Second) mockConnection := &management.MockConnection{} middleware.Start(mockConnection) assert.Equal(t, "bytecount 1", mockConnection.LastLine) } func Test_ConsumeLine(t *testing.T) { var tests = []struct { line string expectedConsumed bool expectedError error expectedBytesReceived int expectedBytesSent int }{ {">BYTECOUNT:3018,3264", true, nil, 3018, 3264}, {">BYTECOUNT:0,3264", true, nil, 0, 3264}, {">BYTECOUNT:3018,", true, errors.New(`strconv.ParseInt: parsing "": invalid syntax`), 0, 0}, {">BYTECOUNT:,", true, errors.New(`strconv.ParseInt: parsing "": invalid syntax`), 0, 0}, {"OTHER", false, nil, 0, 0}, {"BYTECOUNT", false, nil, 0, 0}, {"BYTECOUNT:", false, nil, 0, 0}, {"BYTECOUNT:3018,3264", false, nil, 0, 0}, {">BYTECOUNTT:3018,3264", false, nil, 0, 0}, } for _, test := range tests { statsRecorder := &fakeStatsRecorder{} middleware := NewMiddleware(statsRecorder.record, 1*time.Second) consumed, err := middleware.ConsumeLine(test.line) if test.expectedError != nil { assert.Error(t, test.expectedError, err.Error(), test.line) } else { assert.NoError(t, err, test.line) } assert.Equal(t, test.expectedConsumed, consumed, test.line) assert.Equal(t, test.expectedBytesReceived, statsRecorder.LastSessionStats.BytesReceived) assert.Equal(t, test.expectedBytesSent, statsRecorder.LastSessionStats.BytesSent) } }
1
11,731
What is the test case here?
mysteriumnetwork-node
go
@@ -130,6 +130,7 @@ func run(o *Options) error { go flowAggregator.Run(stopCh, &wg) informerFactory.Start(stopCh) + informerFactory.WaitForCacheSync(stopCh) <-stopCh klog.Infof("Stopping flow aggregator")
1
// Copyright 2020 Antrea Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "hash/fnv" "sync" "time" "github.com/google/uuid" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/klog/v2" "antrea.io/antrea/pkg/clusteridentity" aggregator "antrea.io/antrea/pkg/flowaggregator" "antrea.io/antrea/pkg/log" "antrea.io/antrea/pkg/signals" ) const informerDefaultResync = 12 * time.Hour // genObservationDomainID generates an IPFIX Observation Domain ID when one is not provided by the // user through the flow aggregator configuration. It will first try to generate one // deterministically based on the cluster UUID (if available, with a timeout of 10s). Otherwise, it // will generate a random one. The cluster UUID should be available if Antrea is deployed to the // cluster ahead of the flow aggregator, which is the expectation since when deploying flow // aggregator as a Pod, networking needs to be configured by the CNI plugin. func genObservationDomainID(k8sClient kubernetes.Interface) uint32 { const retryInterval = time.Second const timeout = 10 * time.Second const defaultAntreaNamespace = "kube-system" clusterIdentityProvider := clusteridentity.NewClusterIdentityProvider( defaultAntreaNamespace, clusteridentity.DefaultClusterIdentityConfigMapName, k8sClient, ) var clusterUUID uuid.UUID if err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) { clusterIdentity, _, err := clusterIdentityProvider.Get() if err != nil { return false, nil } clusterUUID = clusterIdentity.UUID return true, nil }); err != nil { klog.Warningf( "Unable to retrieve cluster UUID after %v (does ConfigMap '%s/%s' exist?); will generate a random observation domain ID", timeout, defaultAntreaNamespace, clusteridentity.DefaultClusterIdentityConfigMapName, ) clusterUUID = uuid.New() } h := fnv.New32() h.Write(clusterUUID[:]) observationDomainID := h.Sum32() return observationDomainID } func run(o *Options) error { klog.Infof("Flow aggregator starting...") // Set up signal capture: the first SIGTERM / SIGINT signal is handled gracefully and will // cause the stopCh channel to be closed; if another signal is received before the program // exits, we will force exit. stopCh := signals.RegisterSignalHandlers() log.StartLogFileNumberMonitor(stopCh) k8sClient, err := createK8sClient() if err != nil { return fmt.Errorf("error when creating K8s client: %v", err) } informerFactory := informers.NewSharedInformerFactory(k8sClient, informerDefaultResync) podInformer := informerFactory.Core().V1().Pods() var observationDomainID uint32 if o.config.ObservationDomainID != nil { observationDomainID = *o.config.ObservationDomainID } else { observationDomainID = genObservationDomainID(k8sClient) } klog.Infof("Flow aggregator Observation Domain ID: %d", observationDomainID) var sendJSONRecord bool if o.format == "JSON" { sendJSONRecord = true } else { sendJSONRecord = false } flowAggregator := aggregator.NewFlowAggregator( o.externalFlowCollectorAddr, o.externalFlowCollectorProto, o.activeFlowRecordTimeout, o.inactiveFlowRecordTimeout, o.aggregatorTransportProtocol, o.flowAggregatorAddress, o.includePodLabels, k8sClient, observationDomainID, podInformer, sendJSONRecord, ) err = flowAggregator.InitCollectingProcess() if err != nil { return fmt.Errorf("error when creating collecting process: %v", err) } err = flowAggregator.InitAggregationProcess() if err != nil { return fmt.Errorf("error when creating aggregation process: %v", err) } var wg sync.WaitGroup wg.Add(1) go flowAggregator.Run(stopCh, &wg) informerFactory.Start(stopCh) <-stopCh klog.Infof("Stopping flow aggregator") wg.Wait() return nil } func createK8sClient() (kubernetes.Interface, error) { config, err := rest.InClusterConfig() if err != nil { return nil, err } k8sClient, err := kubernetes.NewForConfig(config) if err != nil { return nil, err } return k8sClient, nil }
1
46,816
You could try moving this closer to the call, where we request label info. We might be doing the check very early.. all the resources may not be present with the informer at this point.
antrea-io-antrea
go
@@ -42,7 +42,10 @@ public interface ManifestFile { required(509, "contains_null", Types.BooleanType.get()), optional(510, "lower_bound", Types.BinaryType.get()), // null if no non-null values optional(511, "upper_bound", Types.BinaryType.get()) - )))); + ))), + optional(512, "added_rows_count", Types.LongType.get()), + optional(513, "existing_rows_count", Types.LongType.get()), + optional(514, "deleted_rows_count", Types.LongType.get())); static Schema schema() { return SCHEMA;
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import java.nio.ByteBuffer; import java.util.List; import org.apache.iceberg.types.Types; import static org.apache.iceberg.types.Types.NestedField.optional; import static org.apache.iceberg.types.Types.NestedField.required; /** * Represents a manifest file that can be scanned to find data files in a table. */ public interface ManifestFile { Schema SCHEMA = new Schema( required(500, "manifest_path", Types.StringType.get()), required(501, "manifest_length", Types.LongType.get()), required(502, "partition_spec_id", Types.IntegerType.get()), optional(503, "added_snapshot_id", Types.LongType.get()), optional(504, "added_data_files_count", Types.IntegerType.get()), optional(505, "existing_data_files_count", Types.IntegerType.get()), optional(506, "deleted_data_files_count", Types.IntegerType.get()), optional(507, "partitions", Types.ListType.ofRequired(508, Types.StructType.of( required(509, "contains_null", Types.BooleanType.get()), optional(510, "lower_bound", Types.BinaryType.get()), // null if no non-null values optional(511, "upper_bound", Types.BinaryType.get()) )))); static Schema schema() { return SCHEMA; } /** * @return fully qualified path to the file, suitable for constructing a Hadoop Path */ String path(); /** * @return length of the manifest file */ long length(); /** * @return ID of the {@link PartitionSpec} used to write the manifest file */ int partitionSpecId(); /** * @return ID of the snapshot that added the manifest file to table metadata */ Long snapshotId(); /** * Returns true if the manifest contains ADDED entries or if the count is not known. * * @return whether this manifest contains entries with ADDED status */ default boolean hasAddedFiles() { return addedFilesCount() == null || addedFilesCount() > 0; } /** * @return the number of data files with status ADDED in the manifest file */ Integer addedFilesCount(); /** * Returns true if the manifest contains EXISTING entries or if the count is not known. * * @return whether this manifest contains entries with EXISTING status */ default boolean hasExistingFiles() { return existingFilesCount() == null || existingFilesCount() > 0; } /** * @return the number of data files with status EXISTING in the manifest file */ Integer existingFilesCount(); /** * Returns true if the manifest contains DELETED entries or if the count is not known. * * @return whether this manifest contains entries with DELETED status */ default boolean hasDeletedFiles() { return deletedFilesCount() == null || deletedFilesCount() > 0; } /** * @return the number of data files with status DELETED in the manifest file */ Integer deletedFilesCount(); /** * Returns a list of {@link PartitionFieldSummary partition field summaries}. * <p> * Each summary corresponds to a field in the manifest file's partition spec, by ordinal. For * example, the partition spec [ ts_day=date(ts), type=identity(type) ] will have 2 summaries. * The first summary is for the ts_day partition field and the second is for the type partition * field. * * @return a list of partition field summaries, one for each field in the manifest's spec */ List<PartitionFieldSummary> partitions(); /** * Copies this {@link ManifestFile manifest file}. Readers can reuse manifest file instances; use * this method to make defensive copies. * * @return a copy of this manifest file */ ManifestFile copy(); /** * Summarizes the values of one partition field stored in a manifest file. */ interface PartitionFieldSummary { Types.StructType TYPE = ManifestFile.schema() .findType("partitions") .asListType() .elementType() .asStructType(); static Types.StructType getType() { return TYPE; } /** * @return true if at least one data file in the manifest has a null value for the field */ boolean containsNull(); /** * @return a ByteBuffer that contains a serialized bound lower than all values of the field */ ByteBuffer lowerBound(); /** * @return a ByteBuffer that contains a serialized bound higher than all values of the field */ ByteBuffer upperBound(); /** * Copies this {@link PartitionFieldSummary summary}. Readers can reuse instances; use this * method to make defensive copies. * * @return a copy of this partition field summary */ PartitionFieldSummary copy(); } }
1
17,537
Can we add these up by the data files counts?
apache-iceberg
java
@@ -30,10 +30,8 @@ const resolve = (list, child, node) => { // callbacks won't get queued in the node anyway. // If revealOrder is 'together' then also do an early exit // if all suspended descendants have not yet been resolved. - if ( - !list.props.revealOrder || - (list.props.revealOrder[0] === 't' && list._map.size) - ) { + const revealOrder = list.props.revealOrder; + if (!revealOrder || (revealOrder[0] === 't' && list._map.size)) { return; }
1
import { Component, toChildArray } from 'preact'; import { suspended } from './suspense.js'; // Indexes to linked list nodes (nodes are stored as arrays to save bytes). const SUSPENDED_COUNT = 0; const RESOLVED_COUNT = 1; const NEXT_NODE = 2; // Having custom inheritance instead of a class here saves a lot of bytes. export function SuspenseList() { this._next = null; this._map = null; } // Mark one of child's earlier suspensions as resolved. // Some pending callbacks may become callable due to this // (e.g. the last suspended descendant gets resolved when // revealOrder === 'together'). Process those callbacks as well. const resolve = (list, child, node) => { if (++node[RESOLVED_COUNT] === node[SUSPENDED_COUNT]) { // The number a child (or any of its descendants) has been suspended // matches the number of times it's been resolved. Therefore we // mark the child as completely resolved by deleting it from ._map. // This is used to figure out when *all* children have been completely // resolved when revealOrder is 'together'. list._map.delete(child); } // If revealOrder is falsy then we can do an early exit, as the // callbacks won't get queued in the node anyway. // If revealOrder is 'together' then also do an early exit // if all suspended descendants have not yet been resolved. if ( !list.props.revealOrder || (list.props.revealOrder[0] === 't' && list._map.size) ) { return; } // Walk the currently suspended children in order, calling their // stored callbacks on the way. Stop if we encounter a child that // has not been completely resolved yet. node = list._next; while (node) { while (node.length > 3) { node.pop()(); } if (node[RESOLVED_COUNT] < node[SUSPENDED_COUNT]) { break; } list._next = node = node[NEXT_NODE]; } }; // Things we do here to save some bytes but are not proper JS inheritance: // - call `new Component()` as the prototype // - do not set `Suspense.prototype.constructor` to `Suspense` SuspenseList.prototype = new Component(); SuspenseList.prototype._suspended = function(child) { const list = this; const delegated = suspended(list._vnode); let node = list._map.get(child); node[SUSPENDED_COUNT]++; return unsuspend => { const wrappedUnsuspend = () => { if (!list.props.revealOrder) { // Special case the undefined (falsy) revealOrder, as there // is no need to coordinate a specific order or unsuspends. unsuspend(); } else { node.push(unsuspend); resolve(list, child, node); } }; if (delegated) { delegated(wrappedUnsuspend); } else { wrappedUnsuspend(); } }; }; SuspenseList.prototype.render = function(props) { this._next = null; this._map = new Map(); const children = toChildArray(props.children); if (props.revealOrder && props.revealOrder[0] === 'b') { // If order === 'backwards' (or, well, anything starting with a 'b') // then flip the child list around so that the last child will be // the first in the linked list. children.reverse(); } // Build the linked list. Iterate through the children in reverse order // so that `_next` points to the first linked list node to be resolved. for (let i = children.length; i--; ) { // Create a new linked list node as an array of form: // [suspended_count, resolved_count, next_node] // where suspended_count and resolved_count are numeric counters for // keeping track how many times a node has been suspended and resolved. // // Note that suspended_count starts from 1 instead of 0, so we can block // processing callbacks until componentDidMount has been called. In a sense // node is suspended at least until componentDidMount gets called! // // Pending callbacks are added to the end of the node: // [suspended_count, resolved_count, next_node, callback_0, callback_1, ...] this._map.set(children[i], (this._next = [1, 0, this._next])); } return props.children; }; SuspenseList.prototype.componentDidUpdate = SuspenseList.prototype.componentDidMount = function() { // Iterate through all children after mounting for two reasons: // 1. As each node[SUSPENDED_COUNT] starts from 1, this iteration increases // each node[RELEASED_COUNT] by 1, therefore balancing the counters. // The nodes can now be completely consumed from the linked list. // 2. Handle nodes that might have gotten resolved between render and // componentDidMount. const list = this; list._map.forEach((node, child) => { resolve(list, child, node); }); };
1
15,165
Most of the time assigning won't save bytes unless used 3+ times (var adds 3bytes)
preactjs-preact
js
@@ -160,8 +160,8 @@ define(['playbackManager', 'nowPlayingHelper', 'events', 'connectionManager'], f if (navigator.mediaSession) { navigator.mediaSession.metadata = new MediaMetadata({ - title: title, - artist: artist, + title: artist, + artist: title, album: album, artwork: getImageUrls(item), albumArtist: albumArtist,
1
define(['playbackManager', 'nowPlayingHelper', 'events', 'connectionManager'], function (playbackManager, nowPlayingHelper, events, connectionManager) { "use strict"; // no support for mediaSession if (!navigator.mediaSession && !window.NativeShell) { return; } // Reports media playback to the device for lock screen control var currentPlayer; var lastUpdateTime = 0; function seriesImageUrl(item, options) { if (item.Type !== 'Episode') { return null; } options = options || {}; options.type = options.type || "Primary"; if (options.type === 'Primary') { if (item.SeriesPrimaryImageTag) { options.tag = item.SeriesPrimaryImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.SeriesId, options); } } if (options.type === 'Thumb') { if (item.SeriesThumbImageTag) { options.tag = item.SeriesThumbImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.SeriesId, options); } if (item.ParentThumbImageTag) { options.tag = item.ParentThumbImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.ParentThumbItemId, options); } } return null; } function imageUrl(item, options) { options = options || {}; options.type = options.type || "Primary"; if (item.ImageTags && item.ImageTags[options.type]) { options.tag = item.ImageTags[options.type]; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.Id, options); } if (item.AlbumId && item.AlbumPrimaryImageTag) { options.tag = item.AlbumPrimaryImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.AlbumId, options); } return null; } function pushImageUrl(item, imageOptions, list) { var url = seriesImageUrl(item, imageOptions) || imageUrl(item, imageOptions); if (url) { var height = imageOptions.height || imageOptions.maxHeight; list.push({ src: url, sizes: height + 'x' + height }); } } function getImageUrls(item) { var list = []; pushImageUrl(item, {height: 96}, list); pushImageUrl(item, {height: 128}, list); pushImageUrl(item, {height: 192}, list); pushImageUrl(item, {height: 256}, list); pushImageUrl(item, {height: 384}, list); pushImageUrl(item, {height: 512}, list); return list; } function updatePlayerState(player, state, eventName) { var item = state.NowPlayingItem; if (!item) { hideMediaControls(); return; } if (eventName == 'init') { // transform "init" event into "timeupdate" to restraint update rate eventName = 'timeupdate'; } var isVideo = item.MediaType === 'Video'; var isLocalPlayer = player.isLocalPlayer || false; // Local players do their own notifications if (isLocalPlayer && isVideo) { return; } var playState = state.PlayState || {}; var parts = nowPlayingHelper.getNowPlayingNames(item); var artist = parts.length === 1 ? '' : parts[0].text; var title = parts[parts.length - 1].text; // Switch these two around for video if (isVideo && parts.length > 1) { var temp = artist; artist = title; title = temp; } var albumArtist; if (item.AlbumArtists && item.AlbumArtists[0]) { albumArtist = item.AlbumArtists[0].Name; } var album = item.Album || ''; var itemId = item.Id; // Convert to ms var duration = parseInt(item.RunTimeTicks ? (item.RunTimeTicks / 10000) : 0); var currentTime = parseInt(playState.PositionTicks ? (playState.PositionTicks / 10000) : 0); var isPaused = playState.IsPaused || false; var canSeek = playState.CanSeek || false; var now = new Date().getTime(); // Don't go crazy reporting position changes if (eventName == 'timeupdate' && (now - lastUpdateTime) < 5000) { // Only report if this item hasn't been reported yet, or if there's an actual playback change. // Don't report on simple time updates return; } lastUpdateTime = now; if (navigator.mediaSession) { navigator.mediaSession.metadata = new MediaMetadata({ title: title, artist: artist, album: album, artwork: getImageUrls(item), albumArtist: albumArtist, currentTime: currentTime, duration: duration, paused: isPaused, itemId: itemId, mediaType: item.MediaType }); } else { var imageUrl = []; pushImageUrl(item, {maxHeight: 400}, imageUrl); if (imageUrl.length) { imageUrl = imageUrl[0].src; } else { imageUrl = null; } window.NativeShell.updateMediaSession({ action: eventName, isLocalPlayer: isLocalPlayer, itemId: itemId, title: title, artist: artist, album: album, duration: duration, position: currentTime, imageUrl: imageUrl, canSeek: canSeek, isPaused: isPaused }); } } function onGeneralEvent(e) { var player = this; var state = playbackManager.getPlayerState(player); updatePlayerState(player, state, e.type); } function onStateChanged(e, state) { var player = this; updatePlayerState(player, state, 'statechange'); } function onPlaybackStart(e, state) { var player = this; updatePlayerState(player, state, e.type); } function onPlaybackStopped(e, state) { var player = this; hideMediaControls(); } function releaseCurrentPlayer() { if (currentPlayer) { events.off(currentPlayer, 'playbackstart', onPlaybackStart); events.off(currentPlayer, 'playbackstop', onPlaybackStopped); events.off(currentPlayer, 'unpause', onGeneralEvent); events.off(currentPlayer, 'pause', onGeneralEvent); events.off(currentPlayer, 'statechange', onStateChanged); events.off(currentPlayer, 'timeupdate', onGeneralEvent); currentPlayer = null; hideMediaControls(); } } function hideMediaControls() { lastUpdateTime = 0; if (navigator.mediaSession) { navigator.mediaSession.metadata = null; } else { window.NativeShell.hideMediaSession(); } } function bindToPlayer(player) { releaseCurrentPlayer(); if (!player) { return; } currentPlayer = player; var state = playbackManager.getPlayerState(player); updatePlayerState(player, state, 'init'); events.on(currentPlayer, 'playbackstart', onPlaybackStart); events.on(currentPlayer, 'playbackstop', onPlaybackStopped); events.on(currentPlayer, 'unpause', onGeneralEvent); events.on(currentPlayer, 'pause', onGeneralEvent); events.on(currentPlayer, 'statechange', onStateChanged); events.on(currentPlayer, 'timeupdate', onGeneralEvent); } function execute(name) { playbackManager[name](currentPlayer); } if (navigator.mediaSession) { navigator.mediaSession.setActionHandler('previoustrack', function () { execute('previousTrack'); }); navigator.mediaSession.setActionHandler('nexttrack', function () { execute('nextTrack'); }); navigator.mediaSession.setActionHandler('play', function () { execute('unpause'); }); navigator.mediaSession.setActionHandler('pause', function () { execute('pause'); }); navigator.mediaSession.setActionHandler('seekbackward', function () { execute('rewind'); }); navigator.mediaSession.setActionHandler('seekforward', function () { execute('fastForward'); }); } events.on(playbackManager, 'playerchange', function () { bindToPlayer(playbackManager.getCurrentPlayer()); }); bindToPlayer(playbackManager.getCurrentPlayer()); });
1
12,908
I would rather find the code that inverts the logic and remove that.
jellyfin-jellyfin-web
js
@@ -142,7 +142,7 @@ class RemoteConnection(object): :Returns: Timeout value in seconds for all http requests made to the Remote Connection """ - return None if cls._timeout == socket._GLOBAL_DEFAULT_TIMEOUT or cls._timeout + return None if cls._timeout == socket._GLOBAL_DEFAULT_TIMEOUT else cls._timeout @classmethod def set_timeout(cls, timeout):
1
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import socket import string import base64 try: import http.client as httplib from urllib import request as url_request from urllib import parse except ImportError: # above is available in py3+, below is py2.7 import httplib as httplib import urllib2 as url_request import urlparse as parse from .command import Command from .errorhandler import ErrorCode from . import utils LOGGER = logging.getLogger(__name__) class Request(url_request.Request): """ Extends the url_request.Request to support all HTTP request types. """ def __init__(self, url, data=None, method=None): """ Initialise a new HTTP request. :Args: - url - String for the URL to send the request to. - data - Data to send with the request. """ if method is None: method = data is not None and 'POST' or 'GET' elif method != 'POST' and method != 'PUT': data = None self._method = method url_request.Request.__init__(self, url, data=data) def get_method(self): """ Returns the HTTP method used by this request. """ return self._method class Response(object): """ Represents an HTTP response. """ def __init__(self, fp, code, headers, url): """ Initialise a new Response. :Args: - fp - The response body file object. - code - The HTTP status code returned by the server. - headers - A dictionary of headers returned by the server. - url - URL of the retrieved resource represented by this Response. """ self.fp = fp self.read = fp.read self.code = code self.headers = headers self.url = url def close(self): """ Close the response body file object. """ self.read = None self.fp = None def info(self): """ Returns the response headers. """ return self.headers def geturl(self): """ Returns the URL for the resource returned in this response. """ return self.url class HttpErrorHandler(url_request.HTTPDefaultErrorHandler): """ A custom HTTP error handler. Used to return Response objects instead of raising an HTTPError exception. """ def http_error_default(self, req, fp, code, msg, headers): """ Default HTTP error handler. :Args: - req - The original Request object. - fp - The response body file object. - code - The HTTP status code returned by the server. - msg - The HTTP status message returned by the server. - headers - The response headers. :Returns: A new Response object. """ return Response(fp, code, headers, req.get_full_url()) class RemoteConnection(object): """A connection with the Remote WebDriver server. Communicates with the server using the WebDriver wire protocol: https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol""" _timeout = socket._GLOBAL_DEFAULT_TIMEOUT @classmethod def get_timeout(cls): """ :Returns: Timeout value in seconds for all http requests made to the Remote Connection """ return None if cls._timeout == socket._GLOBAL_DEFAULT_TIMEOUT or cls._timeout @classmethod def set_timeout(cls, timeout): """ Override the default timeout :Args: - timeout - timeout value for http requests in seconds """ cls._timeout = timeout @classmethod def reset_timeout(cls): """ Reset the http request timeout to socket._GLOBAL_DEFAULT_TIMEOUT """ cls._timeout = socket._GLOBAL_DEFAULT_TIMEOUT def __init__(self, remote_server_addr, keep_alive=False): # Attempt to resolve the hostname and get an IP address. self.keep_alive = keep_alive parsed_url = parse.urlparse(remote_server_addr) addr = "" if parsed_url.hostname: try: netloc = socket.gethostbyname(parsed_url.hostname) addr = netloc if parsed_url.port: netloc += ':%d' % parsed_url.port if parsed_url.username: auth = parsed_url.username if parsed_url.password: auth += ':%s' % parsed_url.password netloc = '%s@%s' % (auth, netloc) remote_server_addr = parse.urlunparse( (parsed_url.scheme, netloc, parsed_url.path, parsed_url.params, parsed_url.query, parsed_url.fragment)) except socket.gaierror: LOGGER.info('Could not get IP address for host: %s' % parsed_url.hostname) self._url = remote_server_addr if keep_alive: self._conn = httplib.HTTPConnection( str(addr), str(parsed_url.port), timeout=self._timeout) self._commands = { Command.STATUS: ('GET', '/status'), Command.NEW_SESSION: ('POST', '/session'), Command.GET_ALL_SESSIONS: ('GET', '/sessions'), Command.QUIT: ('DELETE', '/session/$sessionId'), Command.GET_CURRENT_WINDOW_HANDLE: ('GET', '/session/$sessionId/window_handle'), Command.GET_WINDOW_HANDLES: ('GET', '/session/$sessionId/window_handles'), Command.GET: ('POST', '/session/$sessionId/url'), Command.GO_FORWARD: ('POST', '/session/$sessionId/forward'), Command.GO_BACK: ('POST', '/session/$sessionId/back'), Command.REFRESH: ('POST', '/session/$sessionId/refresh'), Command.EXECUTE_SCRIPT: ('POST', '/session/$sessionId/execute'), Command.GET_CURRENT_URL: ('GET', '/session/$sessionId/url'), Command.GET_TITLE: ('GET', '/session/$sessionId/title'), Command.GET_PAGE_SOURCE: ('GET', '/session/$sessionId/source'), Command.SCREENSHOT: ('GET', '/session/$sessionId/screenshot'), Command.ELEMENT_SCREENSHOT: ('GET', '/session/$sessionId/screenshot/$id'), Command.FIND_ELEMENT: ('POST', '/session/$sessionId/element'), Command.FIND_ELEMENTS: ('POST', '/session/$sessionId/elements'), Command.GET_ACTIVE_ELEMENT: ('POST', '/session/$sessionId/element/active'), Command.FIND_CHILD_ELEMENT: ('POST', '/session/$sessionId/element/$id/element'), Command.FIND_CHILD_ELEMENTS: ('POST', '/session/$sessionId/element/$id/elements'), Command.CLICK_ELEMENT: ('POST', '/session/$sessionId/element/$id/click'), Command.CLEAR_ELEMENT: ('POST', '/session/$sessionId/element/$id/clear'), Command.SUBMIT_ELEMENT: ('POST', '/session/$sessionId/element/$id/submit'), Command.GET_ELEMENT_TEXT: ('GET', '/session/$sessionId/element/$id/text'), Command.SEND_KEYS_TO_ELEMENT: ('POST', '/session/$sessionId/element/$id/value'), Command.SEND_KEYS_TO_ACTIVE_ELEMENT: ('POST', '/session/$sessionId/keys'), Command.UPLOAD_FILE: ('POST', "/session/$sessionId/file"), Command.GET_ELEMENT_VALUE: ('GET', '/session/$sessionId/element/$id/value'), Command.GET_ELEMENT_TAG_NAME: ('GET', '/session/$sessionId/element/$id/name'), Command.IS_ELEMENT_SELECTED: ('GET', '/session/$sessionId/element/$id/selected'), Command.SET_ELEMENT_SELECTED: ('POST', '/session/$sessionId/element/$id/selected'), Command.IS_ELEMENT_ENABLED: ('GET', '/session/$sessionId/element/$id/enabled'), Command.IS_ELEMENT_DISPLAYED: ('GET', '/session/$sessionId/element/$id/displayed'), Command.GET_ELEMENT_LOCATION: ('GET', '/session/$sessionId/element/$id/location'), Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW: ('GET', '/session/$sessionId/element/$id/location_in_view'), Command.GET_ELEMENT_SIZE: ('GET', '/session/$sessionId/element/$id/size'), Command.GET_ELEMENT_RECT: ('GET', '/session/$sessionId/element/$id/rect'), Command.GET_ELEMENT_ATTRIBUTE: ('GET', '/session/$sessionId/element/$id/attribute/$name'), Command.ELEMENT_EQUALS: ('GET', '/session/$sessionId/element/$id/equals/$other'), Command.GET_ALL_COOKIES: ('GET', '/session/$sessionId/cookie'), Command.ADD_COOKIE: ('POST', '/session/$sessionId/cookie'), Command.DELETE_ALL_COOKIES: ('DELETE', '/session/$sessionId/cookie'), Command.DELETE_COOKIE: ('DELETE', '/session/$sessionId/cookie/$name'), Command.SWITCH_TO_FRAME: ('POST', '/session/$sessionId/frame'), Command.SWITCH_TO_PARENT_FRAME: ('POST', '/session/$sessionId/frame/parent'), Command.SWITCH_TO_WINDOW: ('POST', '/session/$sessionId/window'), Command.CLOSE: ('DELETE', '/session/$sessionId/window'), Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY: ('GET', '/session/$sessionId/element/$id/css/$propertyName'), Command.IMPLICIT_WAIT: ('POST', '/session/$sessionId/timeouts/implicit_wait'), Command.EXECUTE_ASYNC_SCRIPT: ('POST', '/session/$sessionId/execute_async'), Command.SET_SCRIPT_TIMEOUT: ('POST', '/session/$sessionId/timeouts/async_script'), Command.SET_TIMEOUTS: ('POST', '/session/$sessionId/timeouts'), Command.DISMISS_ALERT: ('POST', '/session/$sessionId/dismiss_alert'), Command.ACCEPT_ALERT: ('POST', '/session/$sessionId/accept_alert'), Command.SET_ALERT_VALUE: ('POST', '/session/$sessionId/alert_text'), Command.GET_ALERT_TEXT: ('GET', '/session/$sessionId/alert_text'), Command.CLICK: ('POST', '/session/$sessionId/click'), Command.DOUBLE_CLICK: ('POST', '/session/$sessionId/doubleclick'), Command.MOUSE_DOWN: ('POST', '/session/$sessionId/buttondown'), Command.MOUSE_UP: ('POST', '/session/$sessionId/buttonup'), Command.MOVE_TO: ('POST', '/session/$sessionId/moveto'), Command.GET_WINDOW_SIZE: ('GET', '/session/$sessionId/window/$windowHandle/size'), Command.SET_WINDOW_SIZE: ('POST', '/session/$sessionId/window/$windowHandle/size'), Command.GET_WINDOW_POSITION: ('GET', '/session/$sessionId/window/$windowHandle/position'), Command.SET_WINDOW_POSITION: ('POST', '/session/$sessionId/window/$windowHandle/position'), Command.MAXIMIZE_WINDOW: ('POST', '/session/$sessionId/window/$windowHandle/maximize'), Command.SET_SCREEN_ORIENTATION: ('POST', '/session/$sessionId/orientation'), Command.GET_SCREEN_ORIENTATION: ('GET', '/session/$sessionId/orientation'), Command.SINGLE_TAP: ('POST', '/session/$sessionId/touch/click'), Command.TOUCH_DOWN: ('POST', '/session/$sessionId/touch/down'), Command.TOUCH_UP: ('POST', '/session/$sessionId/touch/up'), Command.TOUCH_MOVE: ('POST', '/session/$sessionId/touch/move'), Command.TOUCH_SCROLL: ('POST', '/session/$sessionId/touch/scroll'), Command.DOUBLE_TAP: ('POST', '/session/$sessionId/touch/doubleclick'), Command.LONG_PRESS: ('POST', '/session/$sessionId/touch/longclick'), Command.FLICK: ('POST', '/session/$sessionId/touch/flick'), Command.EXECUTE_SQL: ('POST', '/session/$sessionId/execute_sql'), Command.GET_LOCATION: ('GET', '/session/$sessionId/location'), Command.SET_LOCATION: ('POST', '/session/$sessionId/location'), Command.GET_APP_CACHE: ('GET', '/session/$sessionId/application_cache'), Command.GET_APP_CACHE_STATUS: ('GET', '/session/$sessionId/application_cache/status'), Command.CLEAR_APP_CACHE: ('DELETE', '/session/$sessionId/application_cache/clear'), Command.GET_NETWORK_CONNECTION: ('GET', '/session/$sessionId/network_connection'), Command.SET_NETWORK_CONNECTION: ('POST', '/session/$sessionId/network_connection'), Command.GET_LOCAL_STORAGE_ITEM: ('GET', '/session/$sessionId/local_storage/key/$key'), Command.REMOVE_LOCAL_STORAGE_ITEM: ('DELETE', '/session/$sessionId/local_storage/key/$key'), Command.GET_LOCAL_STORAGE_KEYS: ('GET', '/session/$sessionId/local_storage'), Command.SET_LOCAL_STORAGE_ITEM: ('POST', '/session/$sessionId/local_storage'), Command.CLEAR_LOCAL_STORAGE: ('DELETE', '/session/$sessionId/local_storage'), Command.GET_LOCAL_STORAGE_SIZE: ('GET', '/session/$sessionId/local_storage/size'), Command.GET_SESSION_STORAGE_ITEM: ('GET', '/session/$sessionId/session_storage/key/$key'), Command.REMOVE_SESSION_STORAGE_ITEM: ('DELETE', '/session/$sessionId/session_storage/key/$key'), Command.GET_SESSION_STORAGE_KEYS: ('GET', '/session/$sessionId/session_storage'), Command.SET_SESSION_STORAGE_ITEM: ('POST', '/session/$sessionId/session_storage'), Command.CLEAR_SESSION_STORAGE: ('DELETE', '/session/$sessionId/session_storage'), Command.GET_SESSION_STORAGE_SIZE: ('GET', '/session/$sessionId/session_storage/size'), Command.GET_LOG: ('POST', '/session/$sessionId/log'), Command.GET_AVAILABLE_LOG_TYPES: ('GET', '/session/$sessionId/log/types'), Command.CURRENT_CONTEXT_HANDLE: ('GET', '/session/$sessionId/context'), Command.CONTEXT_HANDLES: ('GET', '/session/$sessionId/contexts'), Command.SWITCH_TO_CONTEXT: ('POST', '/session/$sessionId/context'), } def execute(self, command, params): """ Send a command to the remote server. Any path subtitutions required for the URL mapped to the command should be included in the command parameters. :Args: - command - A string specifying the command to execute. - params - A dictionary of named parameters to send with the command as its JSON payload. """ command_info = self._commands[command] assert command_info is not None, 'Unrecognised command %s' % command data = utils.dump_json(params) path = string.Template(command_info[1]).substitute(params) url = '%s%s' % (self._url, path) return self._request(command_info[0], url, body=data) def _request(self, method, url, body=None): """ Send an HTTP request to the remote server. :Args: - method - A string for the HTTP method to send the request with. - url - A string for the URL to send the request to. - body - A string for request body. Ignored unless method is POST or PUT. :Returns: A dictionary with the server's parsed JSON response. """ LOGGER.debug('%s %s %s' % (method, url, body)) parsed_url = parse.urlparse(url) if self.keep_alive: headers = {"Connection": 'keep-alive', method: parsed_url.path, "User-Agent": "Python http auth", "Content-type": "application/json;charset=\"UTF-8\"", "Accept": "application/json"} if parsed_url.username: auth = base64.standard_b64encode('%s:%s' % (parsed_url.username, parsed_url.password)).replace('\n', '') headers["Authorization"] = "Basic %s" % auth if body and method != 'POST' and method != 'PUT': body = None try: self._conn.request(method, parsed_url.path, body, headers) resp = self._conn.getresponse() except (httplib.HTTPException, socket.error): self._conn.close() raise statuscode = resp.status else: password_manager = None if parsed_url.username: netloc = parsed_url.hostname if parsed_url.port: netloc += ":%s" % parsed_url.port cleaned_url = parse.urlunparse((parsed_url.scheme, netloc, parsed_url.path, parsed_url.params, parsed_url.query, parsed_url.fragment)) password_manager = url_request.HTTPPasswordMgrWithDefaultRealm() password_manager.add_password(None, "%s://%s" % (parsed_url.scheme, netloc), parsed_url.username, parsed_url.password) request = Request(cleaned_url, data=body.encode('utf-8'), method=method) else: request = Request(url, data=body.encode('utf-8'), method=method) request.add_header('Accept', 'application/json') request.add_header('Content-Type', 'application/json;charset=UTF-8') if password_manager: opener = url_request.build_opener(url_request.HTTPRedirectHandler(), HttpErrorHandler(), url_request.HTTPBasicAuthHandler(password_manager)) else: opener = url_request.build_opener(url_request.HTTPRedirectHandler(), HttpErrorHandler()) resp = opener.open(request, timeout=self._timeout) statuscode = resp.code if not hasattr(resp, 'getheader'): if hasattr(resp.headers, 'getheader'): resp.getheader = lambda x: resp.headers.getheader(x) elif hasattr(resp.headers, 'get'): resp.getheader = lambda x: resp.headers.get(x) data = resp.read() try: if 300 <= statuscode < 304: return self._request('GET', resp.getheader('location')) body = data.decode('utf-8').replace('\x00', '').strip() if 399 < statuscode < 500: return {'status': statuscode, 'value': body} content_type = [] if resp.getheader('Content-Type') is not None: content_type = resp.getheader('Content-Type').split(';') if not any([x.startswith('image/png') for x in content_type]): try: data = utils.load_json(body.strip()) except ValueError: if 199 < statuscode < 300: status = ErrorCode.SUCCESS else: status = ErrorCode.UNKNOWN_ERROR return {'status': status, 'value': body.strip()} assert type(data) is dict, ( 'Invalid server response body: %s' % body) assert 'status' in data, ( 'Invalid server response; no status: %s' % body) # Some of the drivers incorrectly return a response # with no 'value' field when they should return null. if 'value' not in data: data['value'] = None return data else: data = {'status': 0, 'value': body.strip()} return data finally: LOGGER.debug("Finished Request") resp.close()
1
12,281
The else doesn't return anything?
SeleniumHQ-selenium
java
@@ -48,7 +48,7 @@ func claim(args []string) error { gasLimit = action.ClaimFromRewardingFundBaseGas + action.ClaimFromRewardingFundGasPerByte*uint64(len(payload)) } - gasPriceRau, err := gasPriceInRau() + gasPriceRau, _ := gasPriceInRau() nonce, err := nonce(sender) if err != nil { return output.NewError(0, "failed to get nonce", err)
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package action import ( "github.com/spf13/cobra" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/ioctl/output" "github.com/iotexproject/iotex-core/ioctl/util" ) // actionClaimCmd represents the action claim command var actionClaimCmd = &cobra.Command{ Use: "claim AMOUNT_IOTX [DATA] [-s SIGNER] [-n NONCE] [-l GAS_LIMIT] [-p GASPRICE] [-P PASSWORD] [-y]", Short: "Claim rewards from rewarding fund", Args: cobra.RangeArgs(1, 2), RunE: func(cmd *cobra.Command, args []string) error { cmd.SilenceUsage = true err := claim(args) return output.PrintError(err) }, } func init() { registerWriteCommand(actionClaimCmd) } func claim(args []string) error { amount, err := util.StringToRau(args[0], util.IotxDecimalNum) if err != nil { return output.NewError(output.ConvertError, "invalid amount", err) } payload := make([]byte, 0) if len(args) == 2 { payload = []byte(args[1]) } sender, err := signer() if err != nil { return output.NewError(output.AddressError, "failed to get signer address", err) } gasLimit := gasLimitFlag.Value().(uint64) if gasLimit == 0 { gasLimit = action.ClaimFromRewardingFundBaseGas + action.ClaimFromRewardingFundGasPerByte*uint64(len(payload)) } gasPriceRau, err := gasPriceInRau() nonce, err := nonce(sender) if err != nil { return output.NewError(0, "failed to get nonce", err) } act := (&action.ClaimFromRewardingFundBuilder{}).SetAmount(amount).SetData(payload).Build() return SendAction((&action.EnvelopeBuilder{}).SetNonce(nonce). SetGasPrice(gasPriceRau). SetGasLimit(gasLimit). SetAction(&act).Build(), sender, ) }
1
19,325
assignments should only be cuddled with other assignments (from `wsl`)
iotexproject-iotex-core
go
@@ -3413,9 +3413,8 @@ static void GetLibrarySearchPaths(std::vector<std::string> &paths, const swift::SearchPathOptions &search_path_opts) { paths.clear(); - paths.resize(search_path_opts.LibrarySearchPaths.size() + 1); - std::copy(search_path_opts.LibrarySearchPaths.begin(), - search_path_opts.LibrarySearchPaths.end(), paths.begin()); + paths.assign(search_path_opts.LibrarySearchPaths.begin(), + search_path_opts.LibrarySearchPaths.end()); paths.push_back(search_path_opts.RuntimeLibraryPath); }
1
//===-- SwiftASTContext.cpp -------------------------------------*- C++ -*-===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// #include "lldb/Symbol/SwiftASTContext.h" // C++ Includes #include <mutex> // std::once #include <queue> #include <set> #include <sstream> #include "swift/ABI/MetadataValues.h" #include "swift/AST/ASTContext.h" #include "swift/AST/ASTMangler.h" #include "swift/AST/DebuggerClient.h" #include "swift/AST/Decl.h" #include "swift/AST/DiagnosticEngine.h" #include "swift/AST/ExistentialLayout.h" #include "swift/AST/GenericSignature.h" #include "swift/AST/IRGenOptions.h" #include "swift/AST/NameLookup.h" #include "swift/AST/SearchPathOptions.h" #include "swift/AST/SubstitutionMap.h" #include "swift/AST/Type.h" #include "swift/AST/Types.h" #include "swift/ASTSectionImporter/ASTSectionImporter.h" #include "swift/Basic/Dwarf.h" #include "swift/Basic/LangOptions.h" #include "swift/Basic/Platform.h" #include "swift/Basic/PrimarySpecificPaths.h" #include "swift/Basic/SourceManager.h" #include "swift/ClangImporter/ClangImporter.h" #include "swift/ClangImporter/ClangImporterOptions.h" #include "swift/Demangling/Demangle.h" #include "swift/Driver/Util.h" #include "swift/Frontend/Frontend.h" #include "swift/Frontend/PrintingDiagnosticConsumer.h" #include "swift/IDE/Utils.h" #include "swift/IRGen/Linking.h" #include "swift/SIL/SILModule.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclObjC.h" #include "clang/Basic/TargetInfo.h" #include "clang/Basic/TargetOptions.h" #include "clang/Driver/Driver.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/StringRef.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Process.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Support/TargetSelect.h" #include "llvm/Support/ThreadPool.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" #include "swift/../../lib/IRGen/FixedTypeInfo.h" #include "swift/../../lib/IRGen/GenEnum.h" #include "swift/../../lib/IRGen/GenHeap.h" #include "swift/../../lib/IRGen/IRGenModule.h" #include "swift/../../lib/IRGen/TypeInfo.h" #include "swift/Serialization/SerializedModuleLoader.h" #include "swift/Strings.h" #include "Plugins/ExpressionParser/Swift/SwiftDiagnostic.h" #include "Plugins/ExpressionParser/Swift/SwiftUserExpression.h" #include "lldb/Core/Debugger.h" #include "lldb/Core/DumpDataExtractor.h" #include "lldb/Core/Module.h" #include "lldb/Core/ModuleSpec.h" #include "lldb/Core/PluginManager.h" #include "lldb/Core/Section.h" #include "lldb/Core/StreamFile.h" #include "lldb/Core/ThreadSafeDenseMap.h" #include "lldb/Expression/DiagnosticManager.h" #include "lldb/Host/Host.h" #include "lldb/Host/HostInfo.h" #include "lldb/Host/StringConvert.h" #include "lldb/Symbol/ClangASTContext.h" #include "lldb/Symbol/CompileUnit.h" #include "lldb/Symbol/ObjectFile.h" #include "lldb/Symbol/SymbolFile.h" #include "lldb/Symbol/SymbolVendor.h" #include "lldb/Target/Platform.h" #include "lldb/Target/Process.h" #include "lldb/Target/SwiftLanguageRuntime.h" #include "lldb/Target/Target.h" #include "lldb/Utility/ArchSpec.h" #include "lldb/Utility/CleanUp.h" #include "lldb/Utility/FileSpec.h" #include "lldb/Utility/LLDBAssert.h" #include "lldb/Utility/Log.h" #include "lldb/Utility/Status.h" #include "Plugins/Platform/MacOSX/PlatformDarwin.h" #include "Plugins/SymbolFile/DWARF/DWARFASTParserSwift.h" #define VALID_OR_RETURN(value) \ do { \ if (HasFatalErrors()) { \ return (value); \ } \ } while (0) #define VALID_OR_RETURN_VOID() \ do { \ if (HasFatalErrors()) { \ return; \ } \ } while (0) using namespace lldb; using namespace lldb_private; typedef lldb_private::ThreadSafeDenseMap<swift::ASTContext *, SwiftASTContext *> ThreadSafeSwiftASTMap; static ThreadSafeSwiftASTMap &GetASTMap() { // The global destructor list will tear down all of the modules when the LLDB // shared library is being unloaded and this needs to live beyond all of those // and not be destructed before they have all gone away. So we will leak this // list intentionally so we can avoid global destructor problems. static ThreadSafeSwiftASTMap *g_map_ptr = NULL; static std::once_flag g_once_flag; std::call_once(g_once_flag, []() { g_map_ptr = new ThreadSafeSwiftASTMap(); // NOTE: Intentional leak }); return *g_map_ptr; } static inline swift::Type GetSwiftType(void *opaque_ptr) { return swift::Type((swift::TypeBase *)opaque_ptr); } static inline swift::CanType GetCanonicalSwiftType(void *opaque_ptr) { return ((swift::TypeBase *)opaque_ptr)->getCanonicalType(); } static inline swift::Type GetSwiftType(CompilerType type) { return swift::Type((swift::TypeBase *)type.GetOpaqueQualType()); } static inline swift::CanType GetCanonicalSwiftType(CompilerType type) { return ((swift::TypeBase *)type.GetOpaqueQualType())->getCanonicalType(); } struct EnumElementInfo { CompilerType clang_type; lldb_private::ConstString name; uint64_t byte_size; uint32_t value; // The value for this enumeration element uint32_t extra_value; // If not UINT32_MAX, then this value is an extra value // that appears at offset 0 to tell one or more empty // enums apart. This value will only be filled in if there // are one ore more enum elements that have a non-zero byte size EnumElementInfo() : clang_type(), name(), byte_size(0), extra_value(UINT32_MAX) {} void Dump(Stream &strm) const { strm.Printf("<%2" PRIu64 "> %4u", byte_size, value); if (extra_value != UINT32_MAX) strm.Printf("%4u: ", extra_value); else strm.Printf(" : "); strm.Printf("case %s", name.GetCString()); if (clang_type) strm.Printf("%s", clang_type.GetTypeName().AsCString("<no type name>")); strm.EOL(); } }; class SwiftEnumDescriptor; typedef std::shared_ptr<SwiftEnumDescriptor> SwiftEnumDescriptorSP; typedef llvm::DenseMap<lldb::opaque_compiler_type_t, SwiftEnumDescriptorSP> EnumInfoCache; typedef std::shared_ptr<EnumInfoCache> EnumInfoCacheSP; typedef llvm::DenseMap<const swift::ASTContext *, EnumInfoCacheSP> ASTEnumInfoCacheMap; static EnumInfoCache *GetEnumInfoCache(const swift::ASTContext *a) { static ASTEnumInfoCacheMap g_cache; static std::mutex g_mutex; std::lock_guard<std::mutex> locker(g_mutex); ASTEnumInfoCacheMap::iterator pos = g_cache.find(a); if (pos == g_cache.end()) { g_cache.insert( std::make_pair(a, std::shared_ptr<EnumInfoCache>(new EnumInfoCache()))); return g_cache.find(a)->second.get(); } return pos->second.get(); } namespace { bool IsDirectory(const FileSpec &spec) { return llvm::sys::fs::is_directory(spec.GetPath()); } bool IsRegularFile(const FileSpec &spec) { return llvm::sys::fs::is_regular_file(spec.GetPath()); } } llvm::LLVMContext &SwiftASTContext::GetGlobalLLVMContext() { // TODO check with Sean. Do we really want this to be static across // an LLDB managing multiple Swift processes? static llvm::LLVMContext s_global_context; return s_global_context; } llvm::ArrayRef<swift::VarDecl *> SwiftASTContext::GetStoredProperties( swift::NominalTypeDecl *nominal) { VALID_OR_RETURN(llvm::ArrayRef<swift::VarDecl *>()); // Check whether we already have the stored properties for this // nominal type. auto known = m_stored_properties.find(nominal); if (known != m_stored_properties.end()) return known->second; // Collect the stored properties from the AST and put them in the // cache. auto stored_properties = nominal->getStoredProperties(); auto &stored = m_stored_properties[nominal]; stored = std::vector<swift::VarDecl *>(stored_properties.begin(), stored_properties.end()); return stored; } class SwiftEnumDescriptor { public: enum class Kind { Empty, // no cases in this enum CStyle, // no cases have payloads AllPayload, // all cases have payloads Mixed // some cases have payloads }; struct ElementInfo { lldb_private::ConstString name; CompilerType payload_type; bool has_payload : 1; bool is_indirect : 1; }; Kind GetKind() const { return m_kind; } ConstString GetTypeName() { return m_type_name; } virtual ElementInfo * GetElementFromData(const lldb_private::DataExtractor &data) = 0; virtual size_t GetNumElements() { return GetNumElementsWithPayload() + GetNumCStyleElements(); } virtual size_t GetNumElementsWithPayload() = 0; virtual size_t GetNumCStyleElements() = 0; virtual ElementInfo *GetElementWithPayloadAtIndex(size_t idx) = 0; virtual ElementInfo *GetElementWithNoPayloadAtIndex(size_t idx) = 0; virtual ~SwiftEnumDescriptor() = default; static SwiftEnumDescriptor *CreateDescriptor(swift::ASTContext *ast, swift::CanType swift_can_type, swift::EnumDecl *enum_decl); protected: SwiftEnumDescriptor(swift::ASTContext *ast, swift::CanType swift_can_type, swift::EnumDecl *enum_decl, SwiftEnumDescriptor::Kind k) : m_kind(k), m_type_name() { if (swift_can_type.getPointer()) { if (auto nominal = swift_can_type->getAnyNominal()) { swift::Identifier name(nominal->getName()); if (name.get()) m_type_name.SetCString(name.get()); } } } private: Kind m_kind; ConstString m_type_name; }; class SwiftEmptyEnumDescriptor : public SwiftEnumDescriptor { public: SwiftEmptyEnumDescriptor(swift::ASTContext *ast, swift::CanType swift_can_type, swift::EnumDecl *enum_decl) : SwiftEnumDescriptor(ast, swift_can_type, enum_decl, SwiftEnumDescriptor::Kind::Empty) {} virtual ElementInfo * GetElementFromData(const lldb_private::DataExtractor &data) { return nullptr; } virtual size_t GetNumElementsWithPayload() { return 0; } virtual size_t GetNumCStyleElements() { return 0; } virtual ElementInfo *GetElementWithPayloadAtIndex(size_t idx) { return nullptr; } virtual ElementInfo *GetElementWithNoPayloadAtIndex(size_t idx) { return nullptr; } static bool classof(const SwiftEnumDescriptor *S) { return S->GetKind() == SwiftEnumDescriptor::Kind::Empty; } virtual ~SwiftEmptyEnumDescriptor() = default; }; namespace std { template <> struct less<swift::ClusteredBitVector> { bool operator()(const swift::ClusteredBitVector &lhs, const swift::ClusteredBitVector &rhs) const { int iL = lhs.size() - 1; int iR = rhs.size() - 1; for (; iL >= 0 && iR >= 0; --iL, --iR) { bool bL = lhs[iL]; bool bR = rhs[iR]; if (bL and not bR) return false; if (bR and not bL) return true; } return false; } }; } static std::string Dump(const swift::ClusteredBitVector &bit_vector) { std::string buffer; llvm::raw_string_ostream ostream(buffer); for (size_t i = 0; i < bit_vector.size(); i++) { if (bit_vector[i]) ostream << '1'; else ostream << '0'; if ((i % 4) == 3) ostream << ' '; } ostream.flush(); return buffer; } class SwiftCStyleEnumDescriptor : public SwiftEnumDescriptor { public: SwiftCStyleEnumDescriptor(swift::ASTContext *ast, swift::CanType swift_can_type, swift::EnumDecl *enum_decl) : SwiftEnumDescriptor(ast, swift_can_type, enum_decl, SwiftEnumDescriptor::Kind::CStyle), m_nopayload_elems_bitmask(), m_elements(), m_element_indexes() { Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); if (log) log->Printf("doing C-style enum layout for %s", GetTypeName().AsCString()); SwiftASTContext *swift_ast_ctx = SwiftASTContext::GetSwiftASTContext(ast); swift::irgen::IRGenModule &irgen_module = swift_ast_ctx->GetIRGenModule(); const swift::irgen::EnumImplStrategy &enum_impl_strategy = swift::irgen::getEnumImplStrategy(irgen_module, swift_can_type); llvm::ArrayRef<swift::irgen::EnumImplStrategy::Element> elements_with_no_payload = enum_impl_strategy.getElementsWithNoPayload(); const bool has_payload = false; const bool is_indirect = false; uint64_t case_counter = 0; m_nopayload_elems_bitmask = enum_impl_strategy.getBitMaskForNoPayloadElements(); if (log) log->Printf("m_nopayload_elems_bitmask = %s", Dump(m_nopayload_elems_bitmask).c_str()); for (auto enum_case : elements_with_no_payload) { ConstString case_name(enum_case.decl->getName().str().data()); swift::ClusteredBitVector case_value = enum_impl_strategy.getBitPatternForNoPayloadElement(enum_case.decl); if (log) log->Printf("case_name = %s, unmasked value = %s", case_name.AsCString(), Dump(case_value).c_str()); case_value &= m_nopayload_elems_bitmask; if (log) log->Printf("case_name = %s, masked value = %s", case_name.AsCString(), Dump(case_value).c_str()); std::unique_ptr<ElementInfo> elem_info( new ElementInfo{case_name, CompilerType(), has_payload, is_indirect}); m_element_indexes.emplace(case_counter, elem_info.get()); case_counter++; m_elements.emplace(case_value, std::move(elem_info)); } } virtual ElementInfo * GetElementFromData(const lldb_private::DataExtractor &data) { Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); if (log) log->Printf( "C-style enum - inspecting data to find enum case for type %s", GetTypeName().AsCString()); swift::ClusteredBitVector current_payload; lldb::offset_t offset = 0; for (size_t idx = 0; idx < data.GetByteSize(); idx++) { uint64_t byte = data.GetU8(&offset); current_payload.add(8, byte); } if (log) { log->Printf("m_nopayload_elems_bitmask = %s", Dump(m_nopayload_elems_bitmask).c_str()); log->Printf("current_payload = %s", Dump(current_payload).c_str()); } if (current_payload.size() != m_nopayload_elems_bitmask.size()) { if (log) log->Printf("sizes don't match; getting out with an error"); return nullptr; } current_payload &= m_nopayload_elems_bitmask; if (log) log->Printf("masked current_payload = %s", Dump(current_payload).c_str()); auto iter = m_elements.find(current_payload), end = m_elements.end(); if (iter == end) { if (log) log->Printf("bitmask search failed"); return nullptr; } if (log) log->Printf("bitmask search success - found case %s", iter->second.get()->name.AsCString()); return iter->second.get(); } virtual size_t GetNumElementsWithPayload() { return 0; } virtual size_t GetNumCStyleElements() { return m_elements.size(); } virtual ElementInfo *GetElementWithPayloadAtIndex(size_t idx) { return nullptr; } virtual ElementInfo *GetElementWithNoPayloadAtIndex(size_t idx) { if (idx >= m_element_indexes.size()) return nullptr; return m_element_indexes[idx]; } static bool classof(const SwiftEnumDescriptor *S) { return S->GetKind() == SwiftEnumDescriptor::Kind::CStyle; } virtual ~SwiftCStyleEnumDescriptor() = default; private: swift::ClusteredBitVector m_nopayload_elems_bitmask; std::map<swift::ClusteredBitVector, std::unique_ptr<ElementInfo>> m_elements; std::map<uint64_t, ElementInfo *> m_element_indexes; }; static CompilerType GetFunctionArgumentTuple(const CompilerType &compiler_type) { if (compiler_type.IsValid() && llvm::dyn_cast_or_null<SwiftASTContext>(compiler_type.GetTypeSystem())) { swift::CanType swift_can_type( GetCanonicalSwiftType(compiler_type.GetOpaqueQualType())); auto func = swift::dyn_cast_or_null<swift::AnyFunctionType>( swift_can_type); if (func) { auto input = func.getInput(); // See comment in swift::AnyFunctionType for rationale here: // A function can take either a tuple or a parentype, but if a parentype // (i.e. (Foo)), then it will be reduced down to just Foo, so if the input // is not a tuple, that must mean there is only 1 input. auto tuple = swift::dyn_cast<swift::TupleType>(input); if (tuple) return CompilerType(compiler_type.GetTypeSystem(), tuple); else return CompilerType(compiler_type.GetTypeSystem(), input.getPointer()); } } return CompilerType(); } class SwiftAllPayloadEnumDescriptor : public SwiftEnumDescriptor { public: SwiftAllPayloadEnumDescriptor(swift::ASTContext *ast, swift::CanType swift_can_type, swift::EnumDecl *enum_decl) : SwiftEnumDescriptor(ast, swift_can_type, enum_decl, SwiftEnumDescriptor::Kind::AllPayload), m_tag_bits(), m_elements() { Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); if (log) log->Printf("doing ADT-style enum layout for %s", GetTypeName().AsCString()); SwiftASTContext *swift_ast_ctx = SwiftASTContext::GetSwiftASTContext(ast); swift::irgen::IRGenModule &irgen_module = swift_ast_ctx->GetIRGenModule(); const swift::irgen::EnumImplStrategy &enum_impl_strategy = swift::irgen::getEnumImplStrategy(irgen_module, swift_can_type); llvm::ArrayRef<swift::irgen::EnumImplStrategy::Element> elements_with_payload = enum_impl_strategy.getElementsWithPayload(); m_tag_bits = enum_impl_strategy.getTagBitsForPayloads(); if (log) log->Printf("tag_bits = %s", Dump(m_tag_bits).c_str()); auto module_ctx = enum_decl->getModuleContext(); const bool has_payload = true; for (auto enum_case : elements_with_payload) { ConstString case_name(enum_case.decl->getName().str().data()); swift::EnumElementDecl *case_decl = enum_case.decl; assert(case_decl); CompilerType case_type( ast, swift_can_type->getTypeOfMember(module_ctx, case_decl, nullptr) .getPointer()); case_type = GetFunctionArgumentTuple(case_type.GetFunctionReturnType()); const bool is_indirect = case_decl->isIndirect() || case_decl->getParentEnum()->isIndirect(); if (log) log->Printf("case_name = %s, type = %s, is_indirect = %s", case_name.AsCString(), case_type.GetTypeName().AsCString(), is_indirect ? "yes" : "no"); std::unique_ptr<ElementInfo> elem_info( new ElementInfo{case_name, case_type, has_payload, is_indirect}); m_elements.push_back(std::move(elem_info)); } } virtual ElementInfo * GetElementFromData(const lldb_private::DataExtractor &data) { Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); if (log) log->Printf( "ADT-style enum - inspecting data to find enum case for type %s", GetTypeName().AsCString()); if (m_elements.size() == 0) // no elements, just fail { if (log) log->Printf("enum with no cases. getting out"); return nullptr; } if (m_elements.size() == 1) // one element, so it's gotta be it { if (log) log->Printf("enum with one case. getting out easy with %s", m_elements.front().get()->name.AsCString()); return m_elements.front().get(); } swift::ClusteredBitVector current_payload; lldb::offset_t offset = 0; for (size_t idx = 0; idx < data.GetByteSize(); idx++) { uint64_t byte = data.GetU8(&offset); current_payload.add(8, byte); } if (log) { log->Printf("tag_bits = %s", Dump(m_tag_bits).c_str()); log->Printf("current_payload = %s", Dump(current_payload).c_str()); } if (current_payload.size() != m_tag_bits.size()) { if (log) log->Printf("sizes don't match; getting out with an error"); return nullptr; } size_t discriminator = 0; size_t power_of_2 = 1; auto enumerator = m_tag_bits.enumerateSetBits(); for (llvm::Optional<size_t> next = enumerator.findNext(); next.hasValue(); next = enumerator.findNext()) { discriminator = discriminator + (current_payload[next.getValue()] ? power_of_2 : 0); power_of_2 <<= 1; } if (discriminator >= m_elements.size()) // discriminator too large, get out { if (log) log->Printf("discriminator value of %" PRIu64 " too large, getting out", (uint64_t)discriminator); return nullptr; } else { auto ptr = m_elements[discriminator].get(); if (log) { if (!ptr) log->Printf("discriminator value of %" PRIu64 " acceptable, but null case matched - that's bad", (uint64_t)discriminator); else log->Printf("discriminator value of %" PRIu64 " acceptable, case %s matched", (uint64_t)discriminator, ptr->name.AsCString()); } return ptr; } } virtual size_t GetNumElementsWithPayload() { return m_elements.size(); } virtual size_t GetNumCStyleElements() { return 0; } virtual ElementInfo *GetElementWithPayloadAtIndex(size_t idx) { if (idx >= m_elements.size()) return nullptr; return m_elements[idx].get(); } virtual ElementInfo *GetElementWithNoPayloadAtIndex(size_t idx) { return nullptr; } static bool classof(const SwiftEnumDescriptor *S) { return S->GetKind() == SwiftEnumDescriptor::Kind::AllPayload; } virtual ~SwiftAllPayloadEnumDescriptor() = default; private: swift::ClusteredBitVector m_tag_bits; std::vector<std::unique_ptr<ElementInfo>> m_elements; }; class SwiftMixedEnumDescriptor : public SwiftEnumDescriptor { public: SwiftMixedEnumDescriptor(swift::ASTContext *ast, swift::CanType swift_can_type, swift::EnumDecl *enum_decl) : SwiftEnumDescriptor(ast, swift_can_type, enum_decl, SwiftEnumDescriptor::Kind::Mixed), m_non_payload_cases(ast, swift_can_type, enum_decl), m_payload_cases(ast, swift_can_type, enum_decl) {} virtual ElementInfo * GetElementFromData(const lldb_private::DataExtractor &data) { ElementInfo *elem_info = m_non_payload_cases.GetElementFromData(data); return elem_info ? elem_info : m_payload_cases.GetElementFromData(data); } static bool classof(const SwiftEnumDescriptor *S) { return S->GetKind() == SwiftEnumDescriptor::Kind::Mixed; } virtual size_t GetNumElementsWithPayload() { return m_payload_cases.GetNumElementsWithPayload(); } virtual size_t GetNumCStyleElements() { return m_non_payload_cases.GetNumCStyleElements(); } virtual ElementInfo *GetElementWithPayloadAtIndex(size_t idx) { return m_payload_cases.GetElementWithPayloadAtIndex(idx); } virtual ElementInfo *GetElementWithNoPayloadAtIndex(size_t idx) { return m_non_payload_cases.GetElementWithNoPayloadAtIndex(idx); } virtual ~SwiftMixedEnumDescriptor() = default; private: SwiftCStyleEnumDescriptor m_non_payload_cases; SwiftAllPayloadEnumDescriptor m_payload_cases; }; SwiftEnumDescriptor * SwiftEnumDescriptor::CreateDescriptor(swift::ASTContext *ast, swift::CanType swift_can_type, swift::EnumDecl *enum_decl) { assert(ast); assert(enum_decl); assert(swift_can_type.getPointer()); SwiftASTContext *swift_ast_ctx = SwiftASTContext::GetSwiftASTContext(ast); assert(swift_ast_ctx); swift::irgen::IRGenModule &irgen_module = swift_ast_ctx->GetIRGenModule(); const swift::irgen::EnumImplStrategy &enum_impl_strategy = swift::irgen::getEnumImplStrategy(irgen_module, swift_can_type); llvm::ArrayRef<swift::irgen::EnumImplStrategy::Element> elements_with_payload = enum_impl_strategy.getElementsWithPayload(); llvm::ArrayRef<swift::irgen::EnumImplStrategy::Element> elements_with_no_payload = enum_impl_strategy.getElementsWithNoPayload(); if (elements_with_no_payload.size() == 0) { // nothing with no payload.. empty or all payloads? if (elements_with_payload.size() == 0) return new SwiftEmptyEnumDescriptor(ast, swift_can_type, enum_decl); else return new SwiftAllPayloadEnumDescriptor(ast, swift_can_type, enum_decl); } else { // something with no payload.. mixed or C-style? if (elements_with_payload.size() == 0) return new SwiftCStyleEnumDescriptor(ast, swift_can_type, enum_decl); else return new SwiftMixedEnumDescriptor(ast, swift_can_type, enum_decl); } } static SwiftEnumDescriptor * GetEnumInfoFromEnumDecl(swift::ASTContext *ast, swift::CanType swift_can_type, swift::EnumDecl *enum_decl) { return SwiftEnumDescriptor::CreateDescriptor(ast, swift_can_type, enum_decl); } SwiftEnumDescriptor *SwiftASTContext::GetCachedEnumInfo(void *type) { VALID_OR_RETURN(nullptr); if (type) { EnumInfoCache *enum_info_cache = GetEnumInfoCache(GetASTContext()); EnumInfoCache::const_iterator pos = enum_info_cache->find(type); if (pos != enum_info_cache->end()) return pos->second.get(); swift::CanType swift_can_type(GetCanonicalSwiftType(type)); if (!SwiftASTContext::IsFullyRealized( CompilerType(GetASTContext(), swift_can_type))) return nullptr; SwiftEnumDescriptorSP enum_info_sp; if (auto *enum_type = swift_can_type->getAs<swift::EnumType>()) { enum_info_sp.reset(GetEnumInfoFromEnumDecl( GetASTContext(), swift_can_type, enum_type->getDecl())); } else if (auto *bound_enum_type = swift_can_type->getAs<swift::BoundGenericEnumType>()) { enum_info_sp.reset(GetEnumInfoFromEnumDecl( GetASTContext(), swift_can_type, bound_enum_type->getDecl())); } if (enum_info_sp.get()) enum_info_cache->insert(std::make_pair(type, enum_info_sp)); return enum_info_sp.get(); } return nullptr; } namespace { static inline bool SwiftASTContextSupportsLanguage(lldb::LanguageType language) { return language == eLanguageTypeSwift; } static bool IsDeviceSupport(const char *path) { // The old-style check, which we preserve for safety. if (path && strstr(path, "iOS DeviceSupport")) return true; // The new-style check, which should cover more devices. if (path) if (const char *Developer_Xcode = strstr(path, "Developer")) if (const char *DeviceSupport = strstr(Developer_Xcode, "DeviceSupport")) if (strstr(DeviceSupport, "Symbols")) return true; // Don't look in the simulator runtime frameworks either. They either // duplicate what the SDK has, or for older simulators conflict with them. if (path && strstr(path, ".simruntime/Contents/Resources/")) return true; return false; } } SwiftASTContext::SwiftASTContext(const char *triple, Target *target) : TypeSystem(TypeSystem::eKindSwift), m_source_manager_ap(), m_diagnostic_engine_ap(), m_ast_context_ap(), m_ir_gen_module_ap(), m_compiler_invocation_ap(new swift::CompilerInvocation()), m_dwarf_ast_parser_ap(), m_scratch_module(NULL), m_sil_module_ap(), m_serialized_module_loader(NULL), m_clang_importer(NULL), m_swift_module_cache(), m_mangled_name_to_type_map(), m_type_to_mangled_name_map(), m_pointer_byte_size(0), m_pointer_bit_align(0), m_void_function_type(), m_target_wp(), m_process(NULL), m_platform_sdk_path(), m_resource_dir(), m_ast_file_data_map(), m_initialized_language_options(false), m_initialized_search_path_options(false), m_initialized_clang_importer_options(false), m_reported_fatal_error(false), m_fatal_errors(), m_negative_type_cache(), m_extra_type_info_cache(), m_swift_type_map() { // Set the clang modules cache path. llvm::SmallString<128> path; auto props = ModuleList::GetGlobalModuleListProperties(); props.GetClangModulesCachePath().GetPath(path); m_compiler_invocation_ap->setClangModuleCachePath(path); if (target) m_target_wp = target->shared_from_this(); if (triple) SetTriple(triple); swift::IRGenOptions &ir_gen_opts = m_compiler_invocation_ap->getIRGenOptions(); ir_gen_opts.OutputKind = swift::IRGenOutputKind::Module; ir_gen_opts.UseJIT = true; ir_gen_opts.DWARFVersion = swift::DWARFVersion; // FIXME: lldb does not support resilience yet. ir_gen_opts.EnableResilienceBypass = true; } SwiftASTContext::SwiftASTContext(const SwiftASTContext &rhs) : TypeSystem(rhs.getKind()), m_source_manager_ap(), m_diagnostic_engine_ap(), m_ast_context_ap(), m_ir_gen_module_ap(), m_compiler_invocation_ap(new swift::CompilerInvocation()), m_dwarf_ast_parser_ap(), m_scratch_module(NULL), m_sil_module_ap(), m_serialized_module_loader(NULL), m_clang_importer(NULL), m_swift_module_cache(), m_mangled_name_to_type_map(), m_type_to_mangled_name_map(), m_pointer_byte_size(0), m_pointer_bit_align(0), m_void_function_type(), m_target_wp(), m_process(NULL), m_platform_sdk_path(), m_resource_dir(), m_ast_file_data_map(), m_initialized_language_options(false), m_initialized_search_path_options(false), m_initialized_clang_importer_options(false), m_reported_fatal_error(false), m_fatal_errors(), m_negative_type_cache(), m_extra_type_info_cache(), m_swift_type_map() { if (rhs.m_compiler_invocation_ap) { std::string rhs_triple = rhs.GetTriple(); if (!rhs_triple.empty()) { SetTriple(rhs_triple.c_str()); } llvm::StringRef module_cache_path = rhs.m_compiler_invocation_ap->getClangModuleCachePath(); m_compiler_invocation_ap->setClangModuleCachePath(module_cache_path); } swift::IRGenOptions &ir_gen_opts = m_compiler_invocation_ap->getIRGenOptions(); ir_gen_opts.OutputKind = swift::IRGenOutputKind::Module; ir_gen_opts.UseJIT = true; TargetSP target_sp = rhs.m_target_wp.lock(); if (target_sp) m_target_wp = target_sp; m_platform_sdk_path = rhs.m_platform_sdk_path; m_resource_dir = rhs.m_resource_dir; swift::ASTContext *lhs_ast = GetASTContext(); swift::ASTContext *rhs_ast = const_cast<SwiftASTContext &>(rhs).GetASTContext(); if (lhs_ast && rhs_ast) { lhs_ast->SearchPathOpts = rhs_ast->SearchPathOpts; } GetClangImporter(); } SwiftASTContext::~SwiftASTContext() { if (swift::ASTContext *ctx = m_ast_context_ap.get()) { // A RemoteASTContext associated with this swift::ASTContext has to be // destroyed before the swift::ASTContext is destroyed. if (TargetSP target_sp = m_target_wp.lock()) if (ProcessSP process_sp = target_sp->GetProcessSP()) if (auto *runtime = process_sp->GetSwiftLanguageRuntime()) runtime->ReleaseAssociatedRemoteASTContext(ctx); GetASTMap().Erase(ctx); } } ConstString SwiftASTContext::GetPluginNameStatic() { return ConstString("swift"); } ConstString SwiftASTContext::GetPluginName() { return ClangASTContext::GetPluginNameStatic(); } uint32_t SwiftASTContext::GetPluginVersion() { return 1; } static std::string &GetDefaultResourceDir() { static std::string s_resource_dir; return s_resource_dir; } /// Initialize the compiler invocation with it the search paths from a /// serialized AST. /// \returns true on success. static bool DeserializeCompilerFlags(swift::CompilerInvocation &invocation, StringRef section_data_ref, StringRef name, llvm::raw_ostream &error) { auto result = invocation.loadFromSerializedAST(section_data_ref); if (result == swift::serialization::Status::Valid) return true; error << "While deserializing" << name << ":\n"; switch (result) { case swift::serialization::Status::Valid: llvm_unreachable("already checked"); case swift::serialization::Status::FormatTooOld: error << "The swift module file format is too old to be used by the " "version of the swift compiler in LLDB\n"; break; case swift::serialization::Status::FormatTooNew: error << "the swift module file format is too new to be used by this " "version of the swift compiler in LLDB\n"; break; case swift::serialization::Status::MissingDependency: error << "the swift module file depends on another module that can't be " "loaded\n"; break; case swift::serialization::Status::MissingShadowedModule: error << "the swift module file is an overlay for a clang module, which " "can't be found\n"; break; case swift::serialization::Status::FailedToLoadBridgingHeader: error << "the swift module file depends on a bridging header that can't " "be loaded\n"; break; case swift::serialization::Status::Malformed: error << "the swift module file is malformed\n"; break; case swift::serialization::Status::MalformedDocumentation: error << "the swift module documentation file is malformed in some way\n"; break; case swift::serialization::Status::NameMismatch: error << "the swift module file's name does not match the module it is " "being loaded into\n"; break; case swift::serialization::Status::TargetIncompatible: error << "the swift module file was built for a different target " "platform\n"; break; case swift::serialization::Status::TargetTooNew: error << "the swift module file was built for a target newer than the " "current target\n"; break; } return false; } /// Retrieve the serialized AST data blobs and initialize the compiler /// invocation with it the concatenated search paths form the blobs. /// \returns true if an error was encountered. static bool DeserializeAllCompilerFlags(SwiftASTContext &swift_ast, Module &module, llvm::raw_ostream &error, bool &got_serialized_options) { got_serialized_options = false; auto &invocation = swift_ast.GetCompilerInvocation(); SymbolVendor *sym_vendor = module.GetSymbolVendor(); if (!sym_vendor) return false; auto ast_file_datas = sym_vendor->GetASTData(eLanguageTypeSwift); Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); if (log) log->Printf("Found %d AST file data entries for library: %s.", (int)ast_file_datas.size(), module.GetSpecificationDescription().c_str()); // If no N_AST symbols exist, this is not an error. if (ast_file_datas.empty()) return false; // An AST section consists of one or more AST modules, optionally // with headers. Iterate over all AST modules. for (auto ast_file_data_sp : ast_file_datas) { llvm::StringRef buf((const char *)ast_file_data_sp->GetBytes(), ast_file_data_sp->GetByteSize()); while (!buf.empty()) { std::string last_sdk_path; auto info = swift::serialization::validateSerializedAST(buf); if ((info.status != swift::serialization::Status::Valid) || (info.bytes == 0) || (info.bytes > buf.size())) { if (log) log->Printf("Unable to load AST for module %s from library: %s.", info.name.str().c_str(), module.GetSpecificationDescription().c_str()); return true; } if (info.name.empty()) continue; StringRef moduleData = buf.substr(0, info.bytes); if (log) last_sdk_path = invocation.getSDKPath(); got_serialized_options |= DeserializeCompilerFlags(invocation, moduleData, info.name, error); if (log && !last_sdk_path.empty() && invocation.getSDKPath() != last_sdk_path) log->Printf("SDK path mismatch!\n" "Was \"%s\", found \"%s\" in module %s.", last_sdk_path.c_str(), invocation.getSDKPath().str().c_str(), info.name.str().c_str()); buf = buf.substr(info.bytes); } } return false; } /// Return whether this module contains any serialized Swift ASTs. bool HasSwiftModules(Module &module) { SymbolVendor *sym_vendor = module.GetSymbolVendor(); if (!sym_vendor) return false; auto ast_file_datas = sym_vendor->GetASTData(eLanguageTypeSwift); return !ast_file_datas.empty(); } void SwiftASTContext::RemapClangImporterOptions( const PathMappingList &path_map) { Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); auto &options = GetClangImporterOptions(); std::string remapped; if (path_map.RemapPath(options.BridgingHeader, remapped)) { if (log) log->Printf("remapped %s -> %s", options.BridgingHeader.c_str(), remapped.c_str()); options.BridgingHeader = remapped; } for (auto &arg_string : options.ExtraArgs) { StringRef prefix; StringRef arg = arg_string; if (arg.consume_front("-I")) prefix = "-I"; if (path_map.RemapPath(arg, remapped)) { if (log) log->Printf("remapped %s -> %s%s", arg.str().c_str(), prefix.str().c_str(), remapped.c_str()); arg_string = prefix.str()+remapped; } } } lldb::TypeSystemSP SwiftASTContext::CreateInstance(lldb::LanguageType language, Module &module, Target *target) { if (!SwiftASTContextSupportsLanguage(language)) return lldb::TypeSystemSP(); ArchSpec arch = module.GetArchitecture(); ObjectFile *objfile = module.GetObjectFile(); ArchSpec object_arch; if (!objfile || !objfile->GetArchitecture(object_arch)) return TypeSystemSP(); lldb::CompUnitSP main_compile_unit_sp = module.GetCompileUnitAtIndex(0); Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); if (main_compile_unit_sp && !main_compile_unit_sp->Exists()) { if (log) { StreamString ss; module.GetDescription(&ss); log->Printf("Corresponding source not found for %s, loading module " "%s is unlikely to succeed", main_compile_unit_sp->GetCString(), ss.GetData()); } } std::shared_ptr<SwiftASTContext> swift_ast_sp( target ? (new SwiftASTContextForExpressions(*target)) : new SwiftASTContext()); swift_ast_sp->GetLanguageOptions().DebuggerSupport = true; swift_ast_sp->GetLanguageOptions().EnableAccessControl = false; swift_ast_sp->GetLanguageOptions().EnableTargetOSChecking = false; if (!arch.IsValid()) return TypeSystemSP(); llvm::Triple triple = arch.GetTriple(); if (triple.getOS() == llvm::Triple::UnknownOS) { // cl_kernels are the only binaries that don't have an LC_MIN_VERSION_xxx // load command. This avoids a Swift assertion. #if defined(__APPLE__) switch (triple.getArch()) { default: triple.setOS(llvm::Triple::MacOSX); break; case llvm::Triple::arm: case llvm::Triple::armeb: case llvm::Triple::aarch64: case llvm::Triple::aarch64_be: triple.setOS(llvm::Triple::IOS); break; } #else // Not an elegant hack on OS X, not an elegant hack elsewhere. // But we shouldn't be claiming things are Mac binaries when they are // not. triple.setOS(HostInfo::GetArchitecture().GetTriple().getOS()); #endif } swift_ast_sp->SetTriple(triple.getTriple().c_str(), &module); bool set_triple = false; SymbolVendor *sym_vendor = module.GetSymbolVendor(); std::string resource_dir; std::string target_triple; if (sym_vendor) { bool got_serialized_options; llvm::SmallString<0> error; llvm::raw_svector_ostream errs(error); if (DeserializeAllCompilerFlags(*swift_ast_sp, module, errs, got_serialized_options)) { swift_ast_sp->m_fatal_errors.SetErrorString(error.str()); return swift_ast_sp; } // Some of the bits in the compiler options we keep separately, so we // need to populate them from the serialized options: llvm::StringRef serialized_triple = swift_ast_sp->GetCompilerInvocation().getTargetTriple(); if (serialized_triple.empty()) { if (log) log->Printf("\tSerialized triple for %s was empty.", module.GetSpecificationDescription().c_str()); } else { if (log) log->Printf("\tFound serialized triple for %s: %s.", module.GetSpecificationDescription().c_str(), serialized_triple.data()); swift_ast_sp->SetTriple(serialized_triple.data(), &module); set_triple = true; } llvm::StringRef serialized_sdk_path = swift_ast_sp->GetCompilerInvocation().getSDKPath(); if (serialized_sdk_path.empty()) { if (log) log->Printf("\tNo serialized SDK path."); } else { if (log) log->Printf("\tGot serialized SDK path %s.", serialized_sdk_path.data()); FileSpec sdk_spec(serialized_sdk_path.data(), false); if (sdk_spec.Exists()) { swift_ast_sp->SetPlatformSDKPath(serialized_sdk_path.data()); } } if (!got_serialized_options || !swift_ast_sp->GetPlatformSDKPath()) { std::string platform_sdk_path; if (sym_vendor->GetCompileOption("-sdk", platform_sdk_path)) { FileSpec sdk_spec(platform_sdk_path.c_str(), false); if (sdk_spec.Exists()) { swift_ast_sp->SetPlatformSDKPath(platform_sdk_path.c_str()); } if (sym_vendor->GetCompileOption("-target", target_triple)) { llvm::StringRef parsed_triple(target_triple); swift_ast_sp->SetTriple(target_triple.c_str(), &module); set_triple = true; } } } if (sym_vendor->GetCompileOption("-resource-dir", resource_dir)) { swift_ast_sp->SetResourceDir(resource_dir.c_str()); } else if (!GetDefaultResourceDir().empty()) { // Use the first resource dir we found when setting up a target. swift_ast_sp->SetResourceDir(GetDefaultResourceDir().c_str()); } else { if (log) log->Printf("No resource dir available for module's SwiftASTContext."); } if (!got_serialized_options) { std::vector<std::string> framework_search_paths; if (sym_vendor->GetCompileOptions("-F", framework_search_paths)) { for (std::string &search_path : framework_search_paths) { swift_ast_sp->AddFrameworkSearchPath(search_path.c_str()); } } std::vector<std::string> include_paths; if (sym_vendor->GetCompileOptions("-I", include_paths)) { for (std::string &search_path : include_paths) { const FileSpec path_spec(search_path.c_str(), false); if (path_spec.Exists()) { static const ConstString s_hmap_extension("hmap"); if (IsDirectory(path_spec)) { swift_ast_sp->AddModuleSearchPath(search_path.c_str()); } else if (IsRegularFile(path_spec) && path_spec.GetFileNameExtension() == s_hmap_extension) { std::string argument("-I"); argument.append(search_path); swift_ast_sp->AddClangArgument(argument.c_str()); } } } } std::vector<std::string> cc_options; if (sym_vendor->GetCompileOptions("-Xcc", cc_options)) { for (int i = 0; i < cc_options.size(); ++i) { if (!cc_options[i].compare("-iquote") && i + 1 < cc_options.size()) { swift_ast_sp->AddClangArgumentPair("-iquote", cc_options[i + 1].c_str()); } } } } FileSpecList loaded_modules; sym_vendor->GetLoadedModules(lldb::eLanguageTypeSwift, loaded_modules); for (size_t mi = 0, me = loaded_modules.GetSize(); mi != me; ++mi) { const FileSpec &loaded_module = loaded_modules.GetFileSpecAtIndex(mi); if (loaded_module.Exists()) swift_ast_sp->AddModuleSearchPath( loaded_module.GetDirectory().GetCString()); } } if (!set_triple) { llvm::Triple llvm_triple(swift_ast_sp->GetTriple()); // LLVM wants this to be set to iOS or MacOSX; if we're working on // a bare-boards type image, change the triple for LLVM's benefit. if (llvm_triple.getVendor() == llvm::Triple::Apple && llvm_triple.getOS() == llvm::Triple::UnknownOS) { if (llvm_triple.getArch() == llvm::Triple::arm || llvm_triple.getArch() == llvm::Triple::thumb) { llvm_triple.setOS(llvm::Triple::IOS); } else { llvm_triple.setOS(llvm::Triple::MacOSX); } swift_ast_sp->SetTriple(llvm_triple.str().c_str(), &module); } } // Apply source path remappings ofund in the module's dSYM. swift_ast_sp->RemapClangImporterOptions(module.GetSourceMappingList()); if (!swift_ast_sp->GetClangImporter()) { if (log) { log->Printf("((Module*)%p) [%s]->GetSwiftASTContext() returning NULL " "- couldn't create a ClangImporter", &module, module.GetFileSpec().GetFilename().AsCString("<anonymous>")); } return TypeSystemSP(); } std::vector<std::string> module_names; swift_ast_sp->RegisterSectionModules(module, module_names); swift_ast_sp->ValidateSectionModules(module, module_names); if (log) { log->Printf("((Module*)%p) [%s]->GetSwiftASTContext() = %p", &module, module.GetFileSpec().GetFilename().AsCString("<anonymous>"), swift_ast_sp.get()); swift_ast_sp->DumpConfiguration(log); } return swift_ast_sp; } lldb::TypeSystemSP SwiftASTContext::CreateInstance(lldb::LanguageType language, Target &target, const char *extra_options) { if (!SwiftASTContextSupportsLanguage(language)) return lldb::TypeSystemSP(); ArchSpec arch = target.GetArchitecture(); // Make an AST but don't set the triple yet. We need to try and detect // if we have a iOS simulator... std::shared_ptr<SwiftASTContextForExpressions> swift_ast_sp( new SwiftASTContextForExpressions(target)); Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); if (log) log->Printf("SwiftASTContext::CreateInstance(Target)"); if (!arch.IsValid()) return TypeSystemSP(); swift_ast_sp->GetLanguageOptions().EnableTargetOSChecking = false; bool handled_sdk_path = false; bool handled_resource_dir = false; const size_t num_images = target.GetImages().GetSize(); // Set the SDK path and resource dir prior to doing search paths. // Otherwise when we create search path options we put in the wrong SDK // path. FileSpec &target_sdk_spec = target.GetSDKPath(); if (target_sdk_spec && target_sdk_spec.Exists()) { std::string platform_sdk_path(target_sdk_spec.GetPath()); swift_ast_sp->SetPlatformSDKPath(std::move(platform_sdk_path)); handled_sdk_path = true; } if (target.GetSwiftCreateModuleContextsInParallel()) { // The first call to GetTypeSystemForLanguage() on a module will // trigger the import (and thus most likely the rebuild) of all // the Clang modules that were imported in this module. This can // be a lot of work (potentially ten seconds per module), but it // can be performed in parallel. llvm::ThreadPool pool; for (size_t mi = 0; mi != num_images; ++mi) { auto module_sp = target.GetImages().GetModuleAtIndex(mi); pool.async([=] { module_sp->GetTypeSystemForLanguage(lldb::eLanguageTypeSwift); }); } pool.wait(); } Status module_error; for (size_t mi = 0; mi != num_images; ++mi) { ModuleSP module_sp = target.GetImages().GetModuleAtIndex(mi); // Skip images without a serialized Swift AST. This avoids // spurious warning messages. if (!HasSwiftModules(*module_sp)) continue; SwiftASTContext *module_swift_ast = llvm::dyn_cast_or_null<SwiftASTContext>( module_sp->GetTypeSystemForLanguage(lldb::eLanguageTypeSwift)); if (!module_swift_ast || module_swift_ast->HasFatalErrors() || !module_swift_ast->GetClangImporter()) { // Make sure we warn about this module load failure, the one that // comes from loading types often gets swallowed up and not seen, // this is the only reliable point where we can show this. // But only do it once per UUID so we don't overwhelm the user with // warnings... std::unordered_set<std::string> m_swift_warnings_issued; UUID module_uuid(module_sp->GetUUID()); std::pair<std::unordered_set<std::string>::iterator, bool> result( m_swift_warnings_issued.insert(module_uuid.GetAsString())); if (result.second) { StreamString ss; module_sp->GetDescription(&ss, eDescriptionLevelBrief); if (module_swift_ast->HasFatalErrors()) ss << ": " << module_swift_ast->GetFatalErrors().AsCString("unknown error"); target.GetDebugger().GetErrorFile()->Printf( "warning: Swift error in module %s.\n" "Debug info from this module will be unavailable in the " "debugger.\n\n", ss.GetData()); } continue; } if (!handled_sdk_path) { const char *platform_sdk_path = module_swift_ast->GetPlatformSDKPath(); if (platform_sdk_path) { handled_sdk_path = true; swift_ast_sp->SetPlatformSDKPath(platform_sdk_path); } } if (!handled_resource_dir) { const char *resource_dir = module_swift_ast->GetResourceDir(); if (resource_dir) { handled_resource_dir = true; swift_ast_sp->SetResourceDir(resource_dir); if (GetDefaultResourceDir().empty()) { // Tuck this away as a reasonable default resource dir // for contexts that don't have one. The Swift parser // will assert without one. GetDefaultResourceDir() = resource_dir; } } } if (handled_sdk_path && handled_resource_dir) break; } // First, prime the compiler with the options from the main executable: bool got_serialized_options = false; ModuleSP exe_module_sp(target.GetExecutableModule()); // If we're debugging a testsuite, then treat the main test bundle as the // executable. if (exe_module_sp && PlatformDarwin::IsUnitTestExecutable(*exe_module_sp)) { ModuleSP unit_test_module = PlatformDarwin::GetUnitTestModule(target.GetImages()); if (unit_test_module) { exe_module_sp = unit_test_module; } } // Attempt to deserialize the compiler flags from the AST. if (exe_module_sp) { llvm::SmallString<0> error; llvm::raw_svector_ostream errs(error); bool failed = DeserializeAllCompilerFlags(*swift_ast_sp, *exe_module_sp, errs, got_serialized_options); if (log && failed) log->Printf( "Attempt to load compiler options from serialized AST failed: %s", error.c_str()); } // Now if the user fully specified the triple, let that override the one // we got from executable's options: if (target.GetArchitecture().IsFullySpecifiedTriple()) { swift_ast_sp->SetTriple( target.GetArchitecture().GetTriple().str().c_str()); } else { // Always run using the Host OS triple... bool set_triple = false; PlatformSP platform_sp(target.GetPlatform()); uint32_t major, minor, update; if (platform_sp && !target.GetArchitecture().GetTriple().hasEnvironment() && platform_sp->GetOSVersion(major, minor, update, target.GetProcessSP().get())) { StreamString full_triple_name; full_triple_name.PutCString(target.GetArchitecture().GetTriple().str()); if (major != UINT32_MAX) { full_triple_name.Printf("%u", major); if (minor != UINT32_MAX) { full_triple_name.Printf(".%u", minor); if (update != UINT32_MAX) full_triple_name.Printf(".%u", update); } } swift_ast_sp->SetTriple(full_triple_name.GetString().data()); set_triple = true; } if (!set_triple) { ModuleSP exe_module_sp(target.GetExecutableModule()); if (exe_module_sp) { Status exe_error; SwiftASTContext *exe_swift_ctx = llvm::dyn_cast_or_null<SwiftASTContext>( exe_module_sp->GetTypeSystemForLanguage( lldb::eLanguageTypeSwift)); if (exe_swift_ctx) { swift_ast_sp->SetTriple( exe_swift_ctx->GetLanguageOptions().Target.str().c_str()); } } } } const bool use_all_compiler_flags = !got_serialized_options || target.GetUseAllCompilerFlags(); std::function<void(ModuleSP &&)> process_one_module = [&target, &swift_ast_sp, use_all_compiler_flags](ModuleSP &&module_sp) { const FileSpec &module_file = module_sp->GetFileSpec(); std::string module_path = module_file.GetPath(); // Add the containing framework to the framework search path. Don't // do that if this is the executable module, since it might be // buried in some framework that we don't care about. if (use_all_compiler_flags && target.GetExecutableModulePointer() != module_sp.get()) { size_t framework_offset = module_path.rfind(".framework/"); if (framework_offset != std::string::npos) { // Sometimes the version of the framework that got loaded has been // stripped and in that case, adding it to the framework search // path will just short-cut a clang search that might otherwise // find the needed headers. So don't add these paths. std::string framework_path = module_path.substr(0, framework_offset); framework_path.append(".framework"); FileSpec path_spec(framework_path, true); FileSpec headers_spec = path_spec.CopyByAppendingPathComponent("Headers"); bool add_it = false; if (headers_spec.Exists()) add_it = true; if (!add_it) { FileSpec module_spec = path_spec.CopyByAppendingPathComponent("Modules"); if (module_spec.Exists()) add_it = true; } if (!add_it) { Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); if (log) log->Printf("process_one_module rejecting framework path" " \"%s\" as it has no Headers " "or Modules subdirectories.", framework_path.c_str()); } if (add_it) { while (framework_offset && (module_path[framework_offset] != '/')) framework_offset--; if (module_path[framework_offset] == '/') { // framework_offset now points to the '/'; std::string parent_path = module_path.substr(0, framework_offset); if (strncmp(parent_path.c_str(), "/System/Library", strlen("/System/Library")) && !IsDeviceSupport(parent_path.c_str())) { swift_ast_sp->AddFrameworkSearchPath(parent_path.c_str()); } } } } } // Skip images without a serialized Swift AST. if (!HasSwiftModules(*module_sp)) return; SymbolVendor *sym_vendor = module_sp->GetSymbolVendor(); if (!sym_vendor) return; std::vector<std::string> module_names; SymbolFile *sym_file = sym_vendor->GetSymbolFile(); if (!sym_file) return; Status sym_file_error; SwiftASTContext *ast_context = llvm::dyn_cast_or_null<SwiftASTContext>( sym_file->GetTypeSystemForLanguage(lldb::eLanguageTypeSwift)); if (ast_context && !ast_context->HasErrors()) { if (use_all_compiler_flags || target.GetExecutableModulePointer() == module_sp.get()) { for (size_t msi = 0, mse = ast_context->GetNumModuleSearchPaths(); msi < mse; ++msi) { const char *search_path = ast_context->GetModuleSearchPathAtIndex(msi); swift_ast_sp->AddModuleSearchPath(search_path); } for (size_t fsi = 0, fse = ast_context->GetNumFrameworkSearchPaths(); fsi < fse; ++fsi) { const char *search_path = ast_context->GetFrameworkSearchPathAtIndex(fsi); swift_ast_sp->AddFrameworkSearchPath(search_path); } std::string clang_argument; for (size_t osi = 0, ose = ast_context->GetNumClangArguments(); osi < ose; ++osi) { // Join multi-arg -D and -U options for uniquing. clang_argument += ast_context->GetClangArgumentAtIndex(osi); if (clang_argument == "-D" || clang_argument == "-U") continue; // Enable uniquing for -D and -U options. bool force = true; if (clang_argument.size() >= 2 && clang_argument[0] == '-' && (clang_argument[1] == 'D' || clang_argument[1] == 'U')) force = false; swift_ast_sp->AddClangArgument(clang_argument, force); clang_argument.clear(); } } swift_ast_sp->RegisterSectionModules(*module_sp, module_names); } }; for (size_t mi = 0; mi != num_images; ++mi) { process_one_module(target.GetImages().GetModuleAtIndex(mi)); } FileSpecList &framework_search_paths = target.GetSwiftFrameworkSearchPaths(); FileSpecList &module_search_paths = target.GetSwiftModuleSearchPaths(); for (size_t fi = 0, fe = framework_search_paths.GetSize(); fi != fe; ++fi) { swift_ast_sp->AddFrameworkSearchPath( framework_search_paths.GetFileSpecAtIndex(fi).GetPath().c_str()); } for (size_t mi = 0, me = module_search_paths.GetSize(); mi != me; ++mi) { swift_ast_sp->AddModuleSearchPath( module_search_paths.GetFileSpecAtIndex(mi).GetPath().c_str()); } // Now fold any extra options we were passed. This has to be done BEFORE // the ClangImporter is made by calling GetClangImporter or these options // will be ignored. if (extra_options) { swift::CompilerInvocation &compiler_invocation = swift_ast_sp->GetCompilerInvocation(); Args extra_args(extra_options); llvm::ArrayRef<const char *> extra_args_ref(extra_args.GetArgumentVector(), extra_args.GetArgumentCount()); compiler_invocation.parseArgs(extra_args_ref, swift_ast_sp->GetDiagnosticEngine()); } // Apply source path remappings ofund in the target settings. swift_ast_sp->RemapClangImporterOptions(target.GetSourcePathMap()); // This needs to happen once all the import paths are set, or otherwise no // modules will be found. if (!swift_ast_sp->GetClangImporter()) { if (log) { log->Printf("((Target*)%p)->GetSwiftASTContext() returning NULL - " "couldn't create a ClangImporter", &target); } return TypeSystemSP(); } if (log) { log->Printf("((Target*)%p)->GetSwiftASTContext() = %p", &target, swift_ast_sp.get()); swift_ast_sp->DumpConfiguration(log); } if (swift_ast_sp->HasFatalErrors()) { swift_ast_sp->m_error.SetErrorStringWithFormat( "Error creating target Swift AST context: %s", swift_ast_sp->GetFatalErrors().AsCString()); return lldb::TypeSystemSP(); } { const bool can_create = true; if (!swift_ast_sp->m_ast_context_ap->getStdlibModule(can_create)) { // We need to be able to load the standard library! return lldb::TypeSystemSP(); } } return swift_ast_sp; } void SwiftASTContext::EnumerateSupportedLanguages( std::set<lldb::LanguageType> &languages_for_types, std::set<lldb::LanguageType> &languages_for_expressions) { static std::vector<lldb::LanguageType> s_supported_languages_for_types( {lldb::eLanguageTypeSwift}); static std::vector<lldb::LanguageType> s_supported_languages_for_expressions( {lldb::eLanguageTypeSwift}); languages_for_types.insert(s_supported_languages_for_types.begin(), s_supported_languages_for_types.end()); languages_for_expressions.insert( s_supported_languages_for_expressions.begin(), s_supported_languages_for_expressions.end()); } static lldb::TypeSystemSP CreateTypeSystemInstance(lldb::LanguageType language, Module *module, Target *target, const char *extra_options) { // This should be called with either a target or a module. if (module) { assert(!target); assert(StringRef(extra_options).empty()); return SwiftASTContext::CreateInstance(language, *module); } else if (target) { assert(!module); return SwiftASTContext::CreateInstance(language, *target, extra_options); } } void SwiftASTContext::Initialize() { PluginManager::RegisterPlugin( GetPluginNameStatic(), "swift AST context plug-in", CreateTypeSystemInstance, EnumerateSupportedLanguages); } void SwiftASTContext::Terminate() { PluginManager::UnregisterPlugin(CreateTypeSystemInstance); } bool SwiftASTContext::SupportsLanguage(lldb::LanguageType language) { return SwiftASTContextSupportsLanguage(language); } Status SwiftASTContext::IsCompatible() { return GetFatalErrors(); } Status SwiftASTContext::GetFatalErrors() { Status error; if (HasFatalErrors()) { error = m_fatal_errors; if (error.Success()) { // Retrieve the error message from the DiagnosticConsumer. DiagnosticManager diagnostic_manager; PrintDiagnostics(diagnostic_manager); error.SetErrorString(diagnostic_manager.GetString()); } } return error; } swift::IRGenOptions &SwiftASTContext::GetIRGenOptions() { return m_compiler_invocation_ap->getIRGenOptions(); } std::string SwiftASTContext::GetTriple() const { return m_compiler_invocation_ap->getTargetTriple(); } // Conditions a triple string to be safe for use with Swift. // Right now this just strips the Haswell marker off the CPU name. // TODO make Swift more robust static std::string GetSwiftFriendlyTriple(const std::string &triple) { static std::string s_x86_64h("x86_64h"); static std::string::size_type s_x86_64h_size = s_x86_64h.size(); if (0 == triple.compare(0, s_x86_64h_size, s_x86_64h)) { std::string fixed_triple("x86_64"); fixed_triple.append( triple.substr(s_x86_64h_size, triple.size() - s_x86_64h_size)); return fixed_triple; } return triple; } bool SwiftASTContext::SetTriple(const char *triple_cstr, Module *module) { if (triple_cstr && triple_cstr[0]) { Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); // We can change our triple up until we create the swift::irgen::IRGenModule if (m_ir_gen_module_ap.get() == NULL) { std::string raw_triple(triple_cstr); std::string triple = GetSwiftFriendlyTriple(raw_triple); llvm::Triple llvm_triple(triple); const unsigned unspecified = 0; // If the OS version is unspecified, do fancy things if (llvm_triple.getOSMajorVersion() == unspecified) { // If a triple is "<arch>-apple-darwin" change it to be // "<arch>-apple-macosx" otherwise the major and minor OS version we // append below would be wrong. if (llvm_triple.getVendor() == llvm::Triple::VendorType::Apple && llvm_triple.getOS() == llvm::Triple::OSType::Darwin) { llvm_triple.setOS(llvm::Triple::OSType::MacOSX); triple = llvm_triple.str(); } // Append the min OS to the triple if we have a target ModuleSP module_sp; if (module == NULL) { TargetSP target_sp(m_target_wp.lock()); if (target_sp) { module_sp = target_sp->GetExecutableModule(); if (module_sp) module = module_sp.get(); } } if (module) { ObjectFile *objfile = module->GetObjectFile(); uint32_t versions[3]; if (objfile) { uint32_t num_versions = objfile->GetMinimumOSVersion(versions, 3); StreamString strm; if (num_versions) { for (uint32_t v = 0; v < 3; ++v) { if (v < num_versions) { if (versions[v] == UINT32_MAX) versions[v] = 0; } else versions[v] = 0; } strm.Printf("%s%u.%u.%u", llvm_triple.getOSName().str().c_str(), versions[0], versions[1], versions[2]); llvm_triple.setOSName(strm.GetString()); triple = llvm_triple.str(); } } } } if (log) log->Printf("%p: SwiftASTContext::SetTriple('%s') setting to '%s'%s", this, triple_cstr, triple.c_str(), m_target_wp.lock() ? " (target)" : ""); m_compiler_invocation_ap->setTargetTriple(triple); // Every time the triple is changed the LangOpts must be // updated too, because Swift default-initializes the // EnableObjCInterop flag based on the triple. GetLanguageOptions().EnableObjCInterop = llvm_triple.isOSDarwin(); return true; } else { if (log) log->Printf("%p: SwiftASTContext::SetTriple('%s') ignoring triple " "since the IRGenModule has already been created", this, triple_cstr); } } return false; } static std::string GetXcodeContentsPath() { const char substr[] = ".app/Contents/"; // First, try based on the current shlib's location { FileSpec fspec; if (HostInfo::GetLLDBPath(ePathTypeLLDBShlibDir, fspec)) { std::string path_to_shlib = fspec.GetPath(); size_t pos = path_to_shlib.rfind(substr); if (pos != std::string::npos) { path_to_shlib.erase(pos + strlen(substr)); return path_to_shlib; } } } // Fall back to using xcrun { int status = 0; int signo = 0; std::string output; const char *command = "xcrun -sdk macosx --show-sdk-path"; lldb_private::Status error = Host::RunShellCommand( command, // shell command to run NULL, // current working directory &status, // Put the exit status of the process in here &signo, // Put the signal that caused the process to exit in here &output, // Get the output from the command and place it in this string 3); // Timeout in seconds to wait for shell program to finish if (status == 0 && !output.empty()) { size_t first_non_newline = output.find_last_not_of("\r\n"); if (first_non_newline != std::string::npos) { output.erase(first_non_newline + 1); } size_t pos = output.rfind(substr); if (pos != std::string::npos) { output.erase(pos + strlen(substr)); return output; } } } return std::string(); } static std::string GetCurrentToolchainPath() { const char substr[] = ".xctoolchain/"; { FileSpec fspec; if (HostInfo::GetLLDBPath(ePathTypeLLDBShlibDir, fspec)) { std::string path_to_shlib = fspec.GetPath(); size_t pos = path_to_shlib.rfind(substr); if (pos != std::string::npos) { path_to_shlib.erase(pos + strlen(substr)); return path_to_shlib; } } } return std::string(); } static std::string GetCurrentCLToolsPath() { const char substr[] = "/CommandLineTools/"; { FileSpec fspec; if (HostInfo::GetLLDBPath(ePathTypeLLDBShlibDir, fspec)) { std::string path_to_shlib = fspec.GetPath(); size_t pos = path_to_shlib.rfind(substr); if (pos != std::string::npos) { path_to_shlib.erase(pos + strlen(substr)); return path_to_shlib; } } } return std::string(); } namespace { enum class SDKType { MacOSX = 0, iPhoneSimulator, iPhoneOS, AppleTVSimulator, AppleTVOS, WatchSimulator, watchOS, numSDKTypes, unknown = -1 }; const char *const sdk_strings[] = { "macosx", "iphonesimulator", "iphoneos", "appletvsimulator", "appletvos", "watchsimulator", "watchos", }; struct SDKEnumeratorInfo { FileSpec found_path; SDKType sdk_type; uint32_t least_major; uint32_t least_minor; }; static bool SDKSupportsSwift(const FileSpec &sdk_path, SDKType desired_type) { ConstString last_path_component = sdk_path.GetLastPathComponent(); if (last_path_component) { const llvm::StringRef sdk_name_raw = last_path_component.GetStringRef(); std::string sdk_name_lower = sdk_name_raw.lower(); const llvm::StringRef sdk_name(sdk_name_lower); llvm::StringRef version_part; SDKType sdk_type = SDKType::unknown; if (desired_type == SDKType::unknown) { for (int i = (int)SDKType::MacOSX; i < (int)SDKType::numSDKTypes; ++i) { if (sdk_name.startswith(sdk_strings[i])) { version_part = sdk_name.drop_front(strlen(sdk_strings[i])); sdk_type = (SDKType)i; break; } } // For non-Darwin SDKs assume Swift is supported if (sdk_type == SDKType::unknown) return true; } else { if (sdk_name.startswith(sdk_strings[(int)desired_type])) { version_part = sdk_name.drop_front(strlen(sdk_strings[(int)desired_type])); sdk_type = desired_type; } else { return false; } } const size_t major_dot_offset = version_part.find('.'); if (major_dot_offset == llvm::StringRef::npos) return false; const llvm::StringRef major_version = version_part.slice(0, major_dot_offset); const llvm::StringRef minor_part = version_part.drop_front(major_dot_offset + 1); const size_t minor_dot_offset = minor_part.find('.'); if (minor_dot_offset == llvm::StringRef::npos) return false; const llvm::StringRef minor_version = minor_part.slice(0, minor_dot_offset); unsigned int major = 0; unsigned int minor = 0; if (major_version.getAsInteger(10, major)) return false; if (minor_version.getAsInteger(10, minor)) return false; switch (sdk_type) { case SDKType::MacOSX: if (major > 10 || (major == 10 && minor >= 10)) return true; break; case SDKType::iPhoneOS: case SDKType::iPhoneSimulator: if (major >= 8) return true; break; case SDKType::AppleTVSimulator: case SDKType::AppleTVOS: if (major >= 9) return true; break; case SDKType::WatchSimulator: case SDKType::watchOS: if (major >= 2) return true; break; default: return false; } } return false; } FileSpec::EnumerateDirectoryResult DirectoryEnumerator(void *baton, llvm::sys::fs::file_type file_type, const FileSpec &spec) { SDKEnumeratorInfo *enumerator_info = static_cast<SDKEnumeratorInfo *>(baton); if (SDKSupportsSwift(spec, enumerator_info->sdk_type)) { enumerator_info->found_path = spec; return FileSpec::EnumerateDirectoryResult::eEnumerateDirectoryResultNext; } return FileSpec::EnumerateDirectoryResult::eEnumerateDirectoryResultNext; }; static ConstString EnumerateSDKsForVersion(FileSpec sdks_spec, SDKType sdk_type, uint32_t least_major, uint32_t least_minor) { if (!IsDirectory(sdks_spec)) return ConstString(); const bool find_directories = true; const bool find_files = false; const bool find_other = true; // include symlinks SDKEnumeratorInfo enumerator_info; enumerator_info.sdk_type = sdk_type; enumerator_info.least_major = least_major; enumerator_info.least_minor = least_minor; FileSpec::EnumerateDirectory(sdks_spec.GetPath().c_str(), find_directories, find_files, find_other, DirectoryEnumerator, &enumerator_info); if (IsDirectory(enumerator_info.found_path)) return ConstString(enumerator_info.found_path.GetPath()); else return ConstString(); } static ConstString GetSDKDirectory(SDKType sdk_type, uint32_t least_major, uint32_t least_minor) { if (sdk_type != SDKType::MacOSX) { // Look inside Xcode for the required installed iOS SDK version std::string sdks_path = GetXcodeContentsPath(); sdks_path.append("Developer/Platforms"); if (sdk_type == SDKType::iPhoneSimulator) { sdks_path.append("/iPhoneSimulator.platform/"); } else if (sdk_type == SDKType::AppleTVSimulator) { sdks_path.append("/AppleTVSimulator.platform/"); } else if (sdk_type == SDKType::AppleTVOS) { sdks_path.append("/AppleTVOS.platform/"); } else if (sdk_type == SDKType::WatchSimulator) { sdks_path.append("/WatchSimulator.platform/"); } else if (sdk_type == SDKType::watchOS) { // For now, we need to be prepared to handle either capitalization of this // path. std::string WatchOS_candidate_path = sdks_path + "/WatchOS.platform/"; if (IsDirectory(FileSpec(WatchOS_candidate_path.c_str(), false))) { sdks_path = WatchOS_candidate_path; } else { std::string watchOS_candidate_path = sdks_path + "/watchOS.platform/"; if (IsDirectory(FileSpec(watchOS_candidate_path.c_str(), false))) { sdks_path = watchOS_candidate_path; } else { return ConstString(); } } } else { sdks_path.append("/iPhoneOS.platform/"); } sdks_path.append("Developer/SDKs/"); FileSpec sdks_spec(sdks_path.c_str(), false); return EnumerateSDKsForVersion(sdks_spec, sdk_type, least_major, least_major); } // The SDK type is Mac OS X uint32_t major = 0; uint32_t minor = 0; uint32_t update = 0; if (!HostInfo::GetOSVersion(major, minor, update)) return ConstString(); // If there are minimum requirements that exceed the current OS, apply those if (least_major > major) { major = least_major; minor = least_minor; } else if (least_major == major) { if (least_minor > minor) minor = least_minor; } typedef std::map<uint64_t, ConstString> SDKDirectoryCache; static std::mutex g_mutex; static SDKDirectoryCache g_sdk_cache; std::lock_guard<std::mutex> locker(g_mutex); const uint64_t major_minor = (uint64_t)major << 32 | (uint64_t)minor; SDKDirectoryCache::iterator pos = g_sdk_cache.find(major_minor); if (pos != g_sdk_cache.end()) return pos->second; FileSpec fspec; std::string xcode_contents_path; if (xcode_contents_path.empty()) xcode_contents_path = GetXcodeContentsPath(); if (!xcode_contents_path.empty()) { StreamString sdk_path; sdk_path.Printf( "%sDeveloper/Platforms/MacOSX.platform/Developer/SDKs/MacOSX%u.%u.sdk", xcode_contents_path.c_str(), major, minor); fspec.SetFile(sdk_path.GetString(), false); if (fspec.Exists()) { ConstString path(sdk_path.GetString()); // Cache results g_sdk_cache[major_minor] = path; return path; } else if ((least_major != major) || (least_minor != minor)) { // Try the required SDK sdk_path.Clear(); sdk_path.Printf("%sDeveloper/Platforms/MacOSX.platform/Developer/SDKs/" "MacOSX%u.%u.sdk", xcode_contents_path.c_str(), least_major, least_minor); fspec.SetFile(sdk_path.GetString(), false); if (fspec.Exists()) { ConstString path(sdk_path.GetString()); // Cache results g_sdk_cache[major_minor] = path; return path; } else { // Okay, we're going to do an exhaustive search for *any* SDK that has // an adequate version. std::string sdks_path = GetXcodeContentsPath(); sdks_path.append("Developer/Platforms/MacOSX.platform/Developer/SDKs"); FileSpec sdks_spec(sdks_path.c_str(), false); ConstString sdk_path = EnumerateSDKsForVersion( sdks_spec, sdk_type, least_major, least_major); if (sdk_path) { g_sdk_cache[major_minor] = sdk_path; return sdk_path; } } } } // Cache results g_sdk_cache[major_minor] = ConstString(); return ConstString(); } static ConstString GetResourceDir() { static ConstString g_cached_resource_dir; static std::once_flag g_once_flag; std::call_once(g_once_flag, []() { Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); // First, check if there's something in our bundle { FileSpec swift_dir_spec; if (HostInfo::GetLLDBPath(ePathTypeSwiftDir, swift_dir_spec)) { if (log) log->Printf("%s: trying ePathTypeSwiftDir: %s", __FUNCTION__, swift_dir_spec.GetCString()); // We can't just check for the Swift directory, because that // always exists. We have to look for "clang" inside that. FileSpec swift_clang_dir_spec = swift_dir_spec; swift_clang_dir_spec.AppendPathComponent("clang"); if (IsDirectory(swift_clang_dir_spec)) { g_cached_resource_dir = ConstString(swift_dir_spec.GetPath()); if (log) log->Printf("%s: found Swift resource dir via " "ePathTypeSwiftDir': %s", __FUNCTION__, g_cached_resource_dir.AsCString()); return; } } } // Nothing in our bundle. Are we in a toolchain that has its own Swift // compiler resource dir? { std::string xcode_toolchain_path = GetCurrentToolchainPath(); if (log) log->Printf("%s: trying toolchain path: %s", __FUNCTION__, xcode_toolchain_path.c_str()); if (!xcode_toolchain_path.empty()) { xcode_toolchain_path.append("usr/lib/swift"); if (log) log->Printf("%s: trying toolchain-based lib path: %s", __FUNCTION__, xcode_toolchain_path.c_str()); if (IsDirectory(FileSpec(xcode_toolchain_path, false))) { g_cached_resource_dir = ConstString(xcode_toolchain_path); if (log) log->Printf("%s: found Swift resource dir via " "toolchain path + 'usr/lib/swift': %s", __FUNCTION__, g_cached_resource_dir.AsCString()); return; } } } // We're not in a toolchain that has one. Use the Xcode default toolchain. { std::string xcode_contents_path = GetXcodeContentsPath(); if (log) log->Printf("%s: trying Xcode path: %s", __FUNCTION__, xcode_contents_path.c_str()); if (!xcode_contents_path.empty()) { xcode_contents_path.append("Developer/Toolchains/" "XcodeDefault.xctoolchain" "/usr/lib/swift"); if (log) log->Printf("%s: trying Xcode-based lib path: %s", __FUNCTION__, xcode_contents_path.c_str()); if (IsDirectory(FileSpec(xcode_contents_path, false))) { g_cached_resource_dir = ConstString(xcode_contents_path); if (log) log->Printf("%s: found Swift resource dir via " "Xcode contents path + default toolchain " "relative dir: %s", __FUNCTION__, g_cached_resource_dir.AsCString()); return; } } } // We're not in Xcode. We might be in the command-line tools. { std::string cl_tools_path = GetCurrentCLToolsPath(); if (log) log->Printf("%s: trying command-line tools path: %s", __FUNCTION__, cl_tools_path.c_str()); if (!cl_tools_path.empty()) { cl_tools_path.append("usr/lib/swift"); if (log) log->Printf("%s: trying command-line tools-based lib " "path: %s", __FUNCTION__, cl_tools_path.c_str()); if (IsDirectory(FileSpec(cl_tools_path, false))) { g_cached_resource_dir = ConstString(cl_tools_path); if (log) log->Printf("%s: found Swift resource dir via " "command-line tools path + " "usr/lib/swift: %s", __FUNCTION__, g_cached_resource_dir.AsCString()); return; } } } // We might be in the build-dir configuration for a build-script-driven // LLDB build, which has the Swift build dir as a sibling directory // to the lldb build dir. This looks much different than the install- // dir layout that the previous checks would try. { FileSpec faux_swift_dir_spec; if (HostInfo::GetLLDBPath(ePathTypeSwiftDir, faux_swift_dir_spec)) { // We can't use a C++11 stdlib regex feature here because it // doesn't work on Ubuntu 14.04 x86_64. Once we don't care // about supporting that anymore, let's pull the code below // back in since it is a simpler implementation using // std::regex. #if 0 // Let's try to regex this. // We're looking for /some/path/lldb-{os}-{arch}, and want to // build the following: // /some/path/swift-{os}-{arch}/lib/swift/{os}/{arch} // In a match, these are the following assignments for // backrefs: // $1 - first part of path before swift build dir // $2 - the host OS path separator character // $3 - all the stuff that should come after changing // lldb to swift for the lib dir. auto match_regex = std::regex("^(.+([/\\\\]))lldb-(.+)$"); const std::string replace_format = "$1swift-$3"; const std::string faux_swift_dir = faux_swift_dir_spec.GetCString(); const std::string build_tree_resource_dir = std::regex_replace(faux_swift_dir, match_regex, replace_format); #else std::string build_tree_resource_dir; const std::string faux_swift_dir = faux_swift_dir_spec.GetCString(); // Find something that matches lldb- (particularly, // the last one). const std::string lldb_dash("lldb-"); auto lldb_pos = faux_swift_dir.rfind(lldb_dash); if ((lldb_pos != std::string::npos) && (lldb_pos > 0) && ((faux_swift_dir[lldb_pos - 1] == '\\') || (faux_swift_dir[lldb_pos - 1] == '/'))) { // We found something that matches ^.+[/\\]lldb-.+$ std::ostringstream stream; // Take everything before lldb- (the path leading up to // the lldb dir). stream << faux_swift_dir.substr(0, lldb_pos); // replace lldb- with swift-. stream << "swift-"; // and now tack on the same components from after // the lldb- part. stream << faux_swift_dir.substr(lldb_pos + lldb_dash.length()); const std::string build_tree_resource_dir = stream.str(); if (log) log->Printf("%s: trying ePathTypeSwiftDir regex-based " "build dir: %s", __FUNCTION__, build_tree_resource_dir.c_str()); FileSpec swift_resource_dir_spec( build_tree_resource_dir.c_str(), false); if (IsDirectory(swift_resource_dir_spec)) { g_cached_resource_dir = ConstString(swift_resource_dir_spec.GetPath()); if (log) log->Printf("%s: found Swift resource dir via " "ePathTypeSwiftDir + inferred " "build-tree dir: %s", __FUNCTION__, g_cached_resource_dir.AsCString()); return; } } #endif } } // We failed to find a reasonable Swift resource dir. if (log) log->Printf("%s: failed to find a Swift resource dir", __FUNCTION__); }); return g_cached_resource_dir; } } // anonymous namespace swift::CompilerInvocation &SwiftASTContext::GetCompilerInvocation() { return *m_compiler_invocation_ap; } swift::SourceManager &SwiftASTContext::GetSourceManager() { if (m_source_manager_ap.get() == NULL) m_source_manager_ap.reset(new swift::SourceManager()); return *m_source_manager_ap; } swift::LangOptions &SwiftASTContext::GetLanguageOptions() { return GetCompilerInvocation().getLangOptions(); } swift::DiagnosticEngine &SwiftASTContext::GetDiagnosticEngine() { if (m_diagnostic_engine_ap.get() == NULL) m_diagnostic_engine_ap.reset( new swift::DiagnosticEngine(GetSourceManager())); return *m_diagnostic_engine_ap; } // This code comes from CompilerInvocation.cpp (setRuntimeResourcePath) static void ConfigureResourceDirs(swift::CompilerInvocation &invocation, FileSpec resource_dir, llvm::Triple triple) { // Make sure the triple is right: invocation.setTargetTriple(triple.str()); invocation.setRuntimeResourcePath(resource_dir.GetPath().c_str()); } swift::SILOptions &SwiftASTContext::GetSILOptions() { return GetCompilerInvocation().getSILOptions(); } bool SwiftASTContext::TargetHasNoSDK() { llvm::Triple triple(GetTriple()); switch (triple.getOS()) { case llvm::Triple::OSType::MacOSX: case llvm::Triple::OSType::Darwin: case llvm::Triple::OSType::IOS: return false; default: return true; } } swift::ClangImporterOptions &SwiftASTContext::GetClangImporterOptions() { swift::ClangImporterOptions &clang_importer_options = GetCompilerInvocation().getClangImporterOptions(); if (!m_initialized_clang_importer_options) { m_initialized_clang_importer_options = true; // Set the Clang module search path. llvm::SmallString<128> path; auto props = ModuleList::GetGlobalModuleListProperties(); props.GetClangModulesCachePath().GetPath(path); clang_importer_options.ModuleCachePath = path.str(); FileSpec clang_dir_spec; if (HostInfo::GetLLDBPath(ePathTypeClangDir, clang_dir_spec)) clang_importer_options.OverrideResourceDir = std::move(clang_dir_spec.GetPath()); clang_importer_options.DebuggerSupport = true; } return clang_importer_options; } swift::SearchPathOptions &SwiftASTContext::GetSearchPathOptions() { swift::SearchPathOptions &search_path_opts = GetCompilerInvocation().getSearchPathOptions(); if (!m_initialized_search_path_options) { m_initialized_search_path_options = true; bool set_sdk = false; bool set_resource_dir = false; if (!search_path_opts.SDKPath.empty()) { FileSpec provided_sdk_path(search_path_opts.SDKPath, false); if (provided_sdk_path.Exists()) { // We don't check whether the SDK supports swift because we figure if // someone is passing this to us on the command line (e.g., for the // REPL), they probably know what they're doing. set_sdk = true; } } else if (!m_platform_sdk_path.empty()) { FileSpec platform_sdk(m_platform_sdk_path.c_str(), false); if (platform_sdk.Exists() && SDKSupportsSwift(platform_sdk, SDKType::unknown)) { search_path_opts.SDKPath = m_platform_sdk_path.c_str(); set_sdk = true; } } llvm::Triple triple(GetTriple()); if (!m_resource_dir.empty()) { FileSpec resource_dir(m_resource_dir.c_str(), false); if (resource_dir.Exists()) { ConfigureResourceDirs(GetCompilerInvocation(), resource_dir, triple); set_resource_dir = true; } } auto is_simulator = [&]() -> bool { return triple.getEnvironment() == llvm::Triple::Simulator || !triple.getArchName().startswith("arm"); }; if (!set_sdk) { switch (triple.getOS()) { case llvm::Triple::OSType::MacOSX: case llvm::Triple::OSType::Darwin: search_path_opts.SDKPath = GetSDKDirectory(SDKType::MacOSX, 10, 10) .AsCString(""); break; case llvm::Triple::OSType::IOS: search_path_opts.SDKPath = is_simulator() ? GetSDKDirectory(SDKType::iPhoneSimulator, 8, 0).AsCString("") : GetSDKDirectory(SDKType::iPhoneOS, 8, 0).AsCString(""); break; case llvm::Triple::OSType::TvOS: search_path_opts.SDKPath = is_simulator() ? GetSDKDirectory(SDKType::AppleTVSimulator, 9, 0).AsCString("") : GetSDKDirectory(SDKType::AppleTVOS, 9, 0).AsCString(""); break; case llvm::Triple::OSType::WatchOS: search_path_opts.SDKPath = is_simulator() ? GetSDKDirectory(SDKType::WatchSimulator, 2, 0).AsCString("") : GetSDKDirectory(SDKType::watchOS, 2, 0).AsCString(""); break; default: // Explicitly leave the SDKPath blank on other platforms. break; } } if (!set_resource_dir) { FileSpec resource_dir(::GetResourceDir().AsCString(""), false); if (resource_dir.Exists()) ConfigureResourceDirs(GetCompilerInvocation(), resource_dir, triple); } } return search_path_opts; } namespace lldb_private { class ANSIColorStringStream : public llvm::raw_string_ostream { public: ANSIColorStringStream(bool colorize) : llvm::raw_string_ostream(m_buffer), m_colorize(colorize) {} /// Changes the foreground color of text that will be output from this point /// forward. /// @param Color ANSI color to use, the special SAVEDCOLOR can be used to /// change only the bold attribute, and keep colors untouched /// @param Bold bold/brighter text, default false /// @param BG if true change the background, default: change foreground /// @returns itself so it can be used within << invocations virtual raw_ostream &changeColor(enum Colors colors, bool bold = false, bool bg = false) { if (llvm::sys::Process::ColorNeedsFlush()) flush(); const char *colorcode; if (colors == SAVEDCOLOR) colorcode = llvm::sys::Process::OutputBold(bg); else colorcode = llvm::sys::Process::OutputColor(colors, bold, bg); if (colorcode) { size_t len = strlen(colorcode); write(colorcode, len); } return *this; } /// Resets the colors to terminal defaults. Call this when you are done /// outputting colored text, or before program exit. virtual raw_ostream &resetColor() { if (llvm::sys::Process::ColorNeedsFlush()) flush(); const char *colorcode = llvm::sys::Process::ResetColor(); if (colorcode) { size_t len = strlen(colorcode); write(colorcode, len); } return *this; } /// Reverses the forground and background colors. virtual raw_ostream &reverseColor() { if (llvm::sys::Process::ColorNeedsFlush()) flush(); const char *colorcode = llvm::sys::Process::OutputReverse(); if (colorcode) { size_t len = strlen(colorcode); write(colorcode, len); } return *this; } /// This function determines if this stream is connected to a "tty" or /// "console" window. That is, the output would be displayed to the user /// rather than being put on a pipe or stored in a file. virtual bool is_displayed() const { return m_colorize; } /// This function determines if this stream is displayed and supports colors. virtual bool has_colors() const { return m_colorize; } protected: std::string m_buffer; bool m_colorize; }; class StoringDiagnosticConsumer : public swift::DiagnosticConsumer { public: StoringDiagnosticConsumer(SwiftASTContext &ast_context) : m_ast_context(ast_context), m_diagnostics(), m_num_errors(0), m_colorize(false) { m_ast_context.GetDiagnosticEngine().resetHadAnyError(); m_ast_context.GetDiagnosticEngine().addConsumer(*this); } ~StoringDiagnosticConsumer() { m_ast_context.GetDiagnosticEngine().takeConsumers(); } virtual void handleDiagnostic(swift::SourceManager &source_mgr, swift::SourceLoc source_loc, swift::DiagnosticKind kind, llvm::StringRef formatString, llvm::ArrayRef<swift::DiagnosticArgument> formatArgs, const swift::DiagnosticInfo &info) { llvm::StringRef bufferName = "<anonymous>"; unsigned bufferID = 0; std::pair<unsigned, unsigned> line_col = {0, 0}; llvm::SmallString<256> text; { llvm::raw_svector_ostream out(text); swift::DiagnosticEngine::formatDiagnosticText(out, formatString, formatArgs); } if (source_loc.isValid()) { bufferID = source_mgr.findBufferContainingLoc(source_loc); bufferName = source_mgr.getBufferIdentifierForLoc(source_loc); line_col = source_mgr.getLineAndColumn(source_loc); } if (line_col.first != 0) { ANSIColorStringStream os(m_colorize); // Determine what kind of diagnostic we're emitting, and whether we want // to use its fixits: bool use_fixits = false; llvm::SourceMgr::DiagKind source_mgr_kind; switch (kind) { default: case swift::DiagnosticKind::Error: source_mgr_kind = llvm::SourceMgr::DK_Error; use_fixits = true; break; case swift::DiagnosticKind::Warning: source_mgr_kind = llvm::SourceMgr::DK_Warning; break; case swift::DiagnosticKind::Note: source_mgr_kind = llvm::SourceMgr::DK_Note; break; } // Translate ranges. llvm::SmallVector<llvm::SMRange, 2> ranges; for (auto R : info.Ranges) ranges.push_back(getRawRange(source_mgr, R)); // Translate fix-its. llvm::SmallVector<llvm::SMFixIt, 2> fix_its; for (swift::DiagnosticInfo::FixIt F : info.FixIts) fix_its.push_back(getRawFixIt(source_mgr, F)); // Display the diagnostic. auto message = source_mgr.GetMessage(source_loc, source_mgr_kind, text, ranges, fix_its); source_mgr.getLLVMSourceMgr().PrintMessage(os, message); // Use the llvm::raw_string_ostream::str() accessor as it will flush // the stream into our "message" and return us a reference to "message". std::string &message_ref = os.str(); if (message_ref.empty()) m_diagnostics.push_back(RawDiagnostic( text.str(), kind, bufferName, bufferID, line_col.first, line_col.second, use_fixits ? info.FixIts : llvm::ArrayRef<swift::Diagnostic::FixIt>())); else m_diagnostics.push_back(RawDiagnostic( message_ref, kind, bufferName, bufferID, line_col.first, line_col.second, use_fixits ? info.FixIts : llvm::ArrayRef<swift::Diagnostic::FixIt>())); } else { m_diagnostics.push_back(RawDiagnostic( text.str(), kind, bufferName, bufferID, line_col.first, line_col.second, llvm::ArrayRef<swift::Diagnostic::FixIt>())); } if (kind == swift::DiagnosticKind::Error) m_num_errors++; } void Clear() { m_ast_context.GetDiagnosticEngine().resetHadAnyError(); m_diagnostics.clear(); m_num_errors = 0; } unsigned NumErrors() { if (m_num_errors) return m_num_errors; else if (m_ast_context.GetASTContext()->hadError()) return 1; else return 0; } static DiagnosticSeverity SeverityForKind(swift::DiagnosticKind kind) { switch (kind) { case swift::DiagnosticKind::Error: return eDiagnosticSeverityError; case swift::DiagnosticKind::Warning: return eDiagnosticSeverityWarning; case swift::DiagnosticKind::Note: return eDiagnosticSeverityRemark; } llvm_unreachable("Unhandled DiagnosticKind in switch."); } void PrintDiagnostics(DiagnosticManager &diagnostic_manager, uint32_t bufferID = UINT32_MAX, uint32_t first_line = 0, uint32_t last_line = UINT32_MAX, uint32_t line_offset = 0) { bool added_one_diagnostic = false; for (const RawDiagnostic &diagnostic : m_diagnostics) { // We often make expressions and wrap them in some code. // When we see errors we want the line numbers to be correct so // we correct them below. LLVM stores in SourceLoc objects as character // offsets so there is no way to get LLVM to move its error line numbers // around by adjusting the source location, we must do it manually. We // also want to use the same error formatting as LLVM and Clang, so we // must muck with the string. const DiagnosticSeverity severity = SeverityForKind(diagnostic.kind); const DiagnosticOrigin origin = eDiagnosticOriginSwift; if (first_line > 0 && bufferID != UINT32_MAX && diagnostic.bufferID == bufferID && !diagnostic.bufferName.empty()) { // Make sure the error line is in range if (diagnostic.line >= first_line && diagnostic.line <= last_line) { // Need to remap the error/warning to a different line StreamString match; match.Printf("%s:%u:", diagnostic.bufferName.str().c_str(), diagnostic.line); const size_t match_len = match.GetString().size(); size_t match_pos = diagnostic.description.find(match.GetString()); if (match_pos != std::string::npos) { // We have some <file>:<line>:" instances that need to be updated StreamString fixed_description; size_t start_pos = 0; do { if (match_pos > start_pos) fixed_description.Printf( "%s", diagnostic.description.substr(start_pos, match_pos) .c_str()); fixed_description.Printf("%s:%u:", diagnostic.bufferName.str().c_str(), diagnostic.line - first_line + line_offset + 1); start_pos = match_pos + match_len; match_pos = diagnostic.description.find(match.GetString(), start_pos); } while (match_pos != std::string::npos); // Append any last remainging text if (start_pos < diagnostic.description.size()) fixed_description.Printf( "%s", diagnostic.description.substr(start_pos, diagnostic.description.size() - start_pos) .c_str()); SwiftDiagnostic *new_diagnostic = new SwiftDiagnostic(fixed_description.GetString().data(), severity, origin, bufferID); for (auto fixit : diagnostic.fixits) new_diagnostic->AddFixIt(fixit); diagnostic_manager.AddDiagnostic(new_diagnostic); added_one_diagnostic = true; continue; } } } } // In general, we don't want to see diagnostics from outside of the source // text range of the actual user expression. But if we didn't find any // diagnostics in the text range, it's probably because the source range was // not specified correctly, and we don't want to lose legit errors because // of that. So in that case we'll add them all here: if (!added_one_diagnostic) { // This will report diagnostic errors from outside the expression's source // range. Those are not interesting to users, so we only emit them in // debug builds. for (const RawDiagnostic &diagnostic : m_diagnostics) { const DiagnosticSeverity severity = SeverityForKind(diagnostic.kind); const DiagnosticOrigin origin = eDiagnosticOriginSwift; diagnostic_manager.AddDiagnostic(diagnostic.description.c_str(), severity, origin); } } } bool GetColorize() const { return m_colorize; } bool SetColorize(bool b) { const bool old = m_colorize; m_colorize = b; return old; } private: // We don't currently use lldb_private::Diagostic or any of the lldb // DiagnosticManager machinery to store diagnostics as they occur. Instead, // we store them in raw form using this struct, then transcode them to // SwiftDiagnostics in PrintDiagnostic. struct RawDiagnostic { RawDiagnostic(std::string in_desc, swift::DiagnosticKind in_kind, llvm::StringRef in_bufferName, unsigned in_bufferID, uint32_t in_line, uint32_t in_column, llvm::ArrayRef<swift::Diagnostic::FixIt> in_fixits) : description(in_desc), kind(in_kind), bufferName(in_bufferName), bufferID(in_bufferID), line(in_line), column(in_column) { for (auto fixit : in_fixits) { fixits.push_back(fixit); } } std::string description; swift::DiagnosticKind kind; const llvm::StringRef bufferName; unsigned bufferID; uint32_t line; uint32_t column; std::vector<swift::DiagnosticInfo::FixIt> fixits; }; typedef std::vector<RawDiagnostic> RawDiagnosticBuffer; SwiftASTContext &m_ast_context; RawDiagnosticBuffer m_diagnostics; unsigned m_num_errors = 0; bool m_colorize; }; } swift::ASTContext *SwiftASTContext::GetASTContext() { if (m_ast_context_ap.get() == NULL) { m_ast_context_ap.reset( new swift::ASTContext(GetLanguageOptions(), GetSearchPathOptions(), GetSourceManager(), GetDiagnosticEngine())); m_diagnostic_consumer_ap.reset(new StoringDiagnosticConsumer(*this)); if (getenv("LLDB_SWIFT_DUMP_DIAGS")) { // NOTE: leaking a swift::PrintingDiagnosticConsumer() here, but this only // gets enabled when the above environment variable is set. GetDiagnosticEngine().addConsumer( *new swift::PrintingDiagnosticConsumer()); } // Install the serialized module loader std::unique_ptr<swift::ModuleLoader> serialized_module_loader_ap( swift::SerializedModuleLoader::create(*m_ast_context_ap)); if (serialized_module_loader_ap) { m_serialized_module_loader = (swift::SerializedModuleLoader *)serialized_module_loader_ap.get(); m_ast_context_ap->addModuleLoader(std::move(serialized_module_loader_ap)); } GetASTMap().Insert(m_ast_context_ap.get(), this); } VALID_OR_RETURN(nullptr); return m_ast_context_ap.get(); } swift::SerializedModuleLoader *SwiftASTContext::GetSerializeModuleLoader() { VALID_OR_RETURN(nullptr); GetASTContext(); return m_serialized_module_loader; } swift::ClangImporter *SwiftASTContext::GetClangImporter() { VALID_OR_RETURN(nullptr); if (m_clang_importer == NULL) { swift::ASTContext *ast_ctx = GetASTContext(); if (!ast_ctx) { return nullptr; } // Install the Clang module loader TargetSP target_sp(m_target_wp.lock()); if (true /*target_sp*/) { // PlatformSP platform_sp = target_sp->GetPlatform(); if (true /*platform_sp*/) { if (!ast_ctx->SearchPathOpts.SDKPath.empty() || TargetHasNoSDK()) { swift::ClangImporterOptions &clang_importer_options = GetClangImporterOptions(); if (!clang_importer_options.OverrideResourceDir.empty()) { std::unique_ptr<swift::ModuleLoader> clang_importer_ap( swift::ClangImporter::create(*m_ast_context_ap, clang_importer_options)); if (clang_importer_ap) { const bool isClang = true; m_clang_importer = (swift::ClangImporter *)clang_importer_ap.get(); m_ast_context_ap->addModuleLoader(std::move(clang_importer_ap), isClang); } } } } } } return m_clang_importer; } bool SwiftASTContext::AddModuleSearchPath(const char *path) { VALID_OR_RETURN(false); if (path && path[0]) { swift::ASTContext *ast = GetASTContext(); std::string path_str(path); bool add_search_path = true; for (auto path : ast->SearchPathOpts.ImportSearchPaths) { if (path == path_str) { add_search_path = false; break; } } if (add_search_path) { ast->SearchPathOpts.ImportSearchPaths.push_back(path); return true; } } return false; } bool SwiftASTContext::AddFrameworkSearchPath(const char *path) { VALID_OR_RETURN(false); if (path && path[0]) { swift::ASTContext *ast = GetASTContext(); std::string path_str(path); bool add_search_path = true; for (const auto &swift_path : ast->SearchPathOpts.FrameworkSearchPaths) { if (swift_path.Path == path_str) { add_search_path = false; break; } } if (add_search_path) { ast->SearchPathOpts.FrameworkSearchPaths.push_back({path, /*isSystem=*/false}); return true; } } return false; } bool SwiftASTContext::AddClangArgument(std::string clang_arg, bool force) { if (!clang_arg.empty()) { swift::ClangImporterOptions &importer_options = GetClangImporterOptions(); bool add_hmap = true; if (!force) { for (std::string &arg : importer_options.ExtraArgs) { if (!arg.compare(clang_arg)) { add_hmap = false; break; } } } if (add_hmap) { importer_options.ExtraArgs.push_back(clang_arg); return true; } } return false; } bool SwiftASTContext::AddClangArgumentPair(const char *clang_arg_1, const char *clang_arg_2) { if (clang_arg_1 && clang_arg_2 && clang_arg_1[0] && clang_arg_2[0]) { swift::ClangImporterOptions &importer_options = GetClangImporterOptions(); bool add_hmap = true; for (ssize_t ai = 0, ae = importer_options.ExtraArgs.size() - 1; // -1 because we look at the next one too ai < ae; ++ai) { if (!importer_options.ExtraArgs[ai].compare(clang_arg_1) && !importer_options.ExtraArgs[ai + 1].compare(clang_arg_2)) { add_hmap = false; break; } } if (add_hmap) { importer_options.ExtraArgs.push_back(clang_arg_1); importer_options.ExtraArgs.push_back(clang_arg_2); return true; } } return false; } size_t SwiftASTContext::GetNumModuleSearchPaths() const { VALID_OR_RETURN(0); if (m_ast_context_ap.get()) return m_ast_context_ap->SearchPathOpts.ImportSearchPaths.size(); return 0; } const char *SwiftASTContext::GetModuleSearchPathAtIndex(size_t idx) const { VALID_OR_RETURN(nullptr); if (m_ast_context_ap.get()) { if (idx < m_ast_context_ap->SearchPathOpts.ImportSearchPaths.size()) return m_ast_context_ap->SearchPathOpts.ImportSearchPaths[idx].c_str(); } return NULL; } size_t SwiftASTContext::GetNumFrameworkSearchPaths() const { VALID_OR_RETURN(0); if (m_ast_context_ap.get()) return m_ast_context_ap->SearchPathOpts.FrameworkSearchPaths.size(); return 0; } const char *SwiftASTContext::GetFrameworkSearchPathAtIndex(size_t idx) const { VALID_OR_RETURN(nullptr); if (m_ast_context_ap.get()) { if (idx < m_ast_context_ap->SearchPathOpts.FrameworkSearchPaths.size()) return m_ast_context_ap->SearchPathOpts.FrameworkSearchPaths[idx].Path.c_str(); } return NULL; } size_t SwiftASTContext::GetNumClangArguments() { swift::ClangImporterOptions &importer_options = GetClangImporterOptions(); return importer_options.ExtraArgs.size(); } const char *SwiftASTContext::GetClangArgumentAtIndex(size_t idx) { swift::ClangImporterOptions &importer_options = GetClangImporterOptions(); if (idx < importer_options.ExtraArgs.size()) return importer_options.ExtraArgs[idx].c_str(); return NULL; } swift::ModuleDecl * SwiftASTContext::GetCachedModule(const ConstString &module_name) { VALID_OR_RETURN(nullptr); SwiftModuleMap::const_iterator iter = m_swift_module_cache.find(module_name.GetCString()); if (iter != m_swift_module_cache.end()) return iter->second; return NULL; } swift::ModuleDecl * SwiftASTContext::CreateModule(const ConstString &module_basename, Status &error) { VALID_OR_RETURN(nullptr); if (module_basename) { swift::ModuleDecl *module = GetCachedModule(module_basename); if (module) { error.SetErrorStringWithFormat("module already exists for '%s'", module_basename.GetCString()); return NULL; } swift::ASTContext *ast = GetASTContext(); if (ast) { swift::Identifier module_id( ast->getIdentifier(module_basename.GetCString())); module = swift::ModuleDecl::create(module_id, *ast); if (module) { m_swift_module_cache[module_basename.GetCString()] = module; return module; } else { error.SetErrorStringWithFormat("invalid swift AST (NULL)"); } } else { error.SetErrorStringWithFormat("invalid swift AST (NULL)"); } } else { error.SetErrorStringWithFormat("invalid module name (empty)"); } return NULL; } void SwiftASTContext::CacheModule(swift::ModuleDecl *module) { VALID_OR_RETURN_VOID(); if (!module) return; auto ID = module->getName().get(); if (nullptr == ID || 0 == ID[0]) return; if (m_swift_module_cache.find(ID) != m_swift_module_cache.end()) return; m_swift_module_cache.insert({ID, module}); } swift::ModuleDecl * SwiftASTContext::GetModule(const ConstString &module_basename, Status &error) { VALID_OR_RETURN(nullptr); Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); if (log) log->Printf("((SwiftASTContext*)%p)->GetModule('%s')", this, module_basename.AsCString("<no name>")); if (module_basename) { swift::ModuleDecl *module = GetCachedModule(module_basename); if (module) return module; if (swift::ASTContext *ast = GetASTContext()) { typedef std::pair<swift::Identifier, swift::SourceLoc> ModuleNameSpec; llvm::StringRef module_basename_sref(module_basename.GetCString()); ModuleNameSpec name_pair(ast->getIdentifier(module_basename_sref), swift::SourceLoc()); if (HasFatalErrors()) { error.SetErrorStringWithFormat("failed to get module '%s' from AST " "context:\nAST context is in a fatal " "error state", module_basename.GetCString()); printf("error in SwiftASTContext::GetModule(%s): AST context is in a " "fatal error stat", module_basename.GetCString()); return nullptr; } ClearDiagnostics(); module = ast->getModuleByName(module_basename_sref); if (HasErrors()) { DiagnosticManager diagnostic_manager; PrintDiagnostics(diagnostic_manager); error.SetErrorStringWithFormat( "failed to get module '%s' from AST context:\n%s", module_basename.GetCString(), diagnostic_manager.GetString().data()); #ifdef LLDB_CONFIGURATION_DEBUG printf("error in SwiftASTContext::GetModule(%s): '%s'", module_basename.GetCString(), diagnostic_manager.GetString().data()); #endif if (log) log->Printf("((SwiftASTContext*)%p)->GetModule('%s') -- error: %s", this, module_basename.GetCString(), diagnostic_manager.GetString().data()); } else if (module) { if (log) log->Printf("((SwiftASTContext*)%p)->GetModule('%s') -- found %s", this, module_basename.GetCString(), module->getName().str().str().c_str()); m_swift_module_cache[module_basename.GetCString()] = module; return module; } else { if (log) log->Printf( "((SwiftASTContext*)%p)->GetModule('%s') -- failed with no error", this, module_basename.GetCString()); error.SetErrorStringWithFormat( "failed to get module '%s' from AST context", module_basename.GetCString()); } } else { if (log) log->Printf( "((SwiftASTContext*)%p)->GetModule('%s') -- invalid ASTContext", this, module_basename.GetCString()); error.SetErrorString("invalid swift::ASTContext"); } } else { if (log) log->Printf( "((SwiftASTContext*)%p)->GetModule('%s') -- empty module name", this, module_basename.GetCString()); error.SetErrorString("invalid module name (empty)"); } return NULL; } swift::ModuleDecl *SwiftASTContext::GetModule(const FileSpec &module_spec, Status &error) { VALID_OR_RETURN(nullptr); ConstString module_basename(module_spec.GetFileNameStrippingExtension()); Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); if (log) log->Printf("((SwiftASTContext*)%p)->GetModule((FileSpec)'%s')", this, module_spec.GetPath().c_str()); if (module_basename) { SwiftModuleMap::const_iterator iter = m_swift_module_cache.find(module_basename.GetCString()); if (iter != m_swift_module_cache.end()) return iter->second; if (module_spec.Exists()) { swift::ASTContext *ast = GetASTContext(); if (!GetClangImporter()) { if (log) log->Printf("((SwiftASTContext*)%p)->GetModule((FileSpec)'%s') -- no " "ClangImporter so giving up", this, module_spec.GetPath().c_str()); error.SetErrorStringWithFormat("couldn't get a ClangImporter"); return nullptr; } std::string module_directory(module_spec.GetDirectory().GetCString()); bool add_search_path = true; for (auto path : ast->SearchPathOpts.ImportSearchPaths) { if (path == module_directory) { add_search_path = false; break; } } // Add the search path if needed so we can find the module by basename if (add_search_path) ast->SearchPathOpts.ImportSearchPaths.push_back( std::move(module_directory)); typedef std::pair<swift::Identifier, swift::SourceLoc> ModuleNameSpec; llvm::StringRef module_basename_sref(module_basename.GetCString()); ModuleNameSpec name_pair(ast->getIdentifier(module_basename_sref), swift::SourceLoc()); swift::ModuleDecl *module = ast->getModule(llvm::ArrayRef<ModuleNameSpec>(name_pair)); if (module) { if (log) log->Printf( "((SwiftASTContext*)%p)->GetModule((FileSpec)'%s') -- found %s", this, module_spec.GetPath().c_str(), module->getName().str().str().c_str()); m_swift_module_cache[module_basename.GetCString()] = module; return module; } else { if (log) log->Printf("((SwiftASTContext*)%p)->GetModule((FileSpec)'%s') -- " "couldn't get from AST context", this, module_spec.GetPath().c_str()); error.SetErrorStringWithFormat( "failed to get module '%s' from AST context", module_basename.GetCString()); } } else { if (log) log->Printf("((SwiftASTContext*)%p)->GetModule((FileSpec)'%s') -- " "doesn't exist", this, module_spec.GetPath().c_str()); error.SetErrorStringWithFormat("module '%s' doesn't exist", module_spec.GetPath().c_str()); } } else { if (log) log->Printf( "((SwiftASTContext*)%p)->GetModule((FileSpec)'%s') -- no basename", this, module_spec.GetPath().c_str()); error.SetErrorStringWithFormat("no module basename in '%s'", module_spec.GetPath().c_str()); } return NULL; } swift::ModuleDecl * SwiftASTContext::FindAndLoadModule(const ConstString &module_basename, Process &process, Status &error) { VALID_OR_RETURN(nullptr); swift::ModuleDecl *swift_module = GetModule(module_basename, error); if (!swift_module) return nullptr; LoadModule(swift_module, process, error); return swift_module; } swift::ModuleDecl * SwiftASTContext::FindAndLoadModule(const FileSpec &module_spec, Process &process, Status &error) { VALID_OR_RETURN(nullptr); swift::ModuleDecl *swift_module = GetModule(module_spec, error); if (!swift_module) return nullptr; LoadModule(swift_module, process, error); return swift_module; } bool SwiftASTContext::LoadOneImage(Process &process, FileSpec &link_lib_spec, Status &error) { VALID_OR_RETURN(false); error.Clear(); PlatformSP platform_sp = process.GetTarget().GetPlatform(); if (platform_sp) return platform_sp->LoadImage(&process, FileSpec(), link_lib_spec, error) != LLDB_INVALID_IMAGE_TOKEN; else return false; } static void GetLibrarySearchPaths(std::vector<std::string> &paths, const swift::SearchPathOptions &search_path_opts) { paths.clear(); paths.resize(search_path_opts.LibrarySearchPaths.size() + 1); std::copy(search_path_opts.LibrarySearchPaths.begin(), search_path_opts.LibrarySearchPaths.end(), paths.begin()); paths.push_back(search_path_opts.RuntimeLibraryPath); } void SwiftASTContext::LoadModule(swift::ModuleDecl *swift_module, Process &process, Status &error) { VALID_OR_RETURN_VOID(); Status current_error; auto addLinkLibrary = [&](swift::LinkLibrary link_lib) { Status load_image_error; StreamString all_dlopen_errors; const char *library_name = link_lib.getName().data(); if (library_name == NULL || library_name[0] == '\0') { error.SetErrorString("Empty library name passed to addLinkLibrary"); return; } SwiftLanguageRuntime *runtime = process.GetSwiftLanguageRuntime(); if (runtime && runtime->IsInLibraryNegativeCache(library_name)) return; swift::LibraryKind library_kind = link_lib.getKind(); Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_TYPES)); if (log) log->Printf("\nLoading link library \"%s\" of kind: %d.", library_name, library_kind); switch (library_kind) { case swift::LibraryKind::Framework: { // First make sure the library isn't already loaded. Since this is a // framework, we make sure the file name and the framework name are the // same, and that we are contained in FileName.framework with no other // intervening frameworks. We can get more restrictive if this gives // false positives. ConstString library_cstr(library_name); std::string framework_name(library_name); framework_name.append(".framework"); // Lookup the module by file basename and make sure that basename has // "<basename>.framework" in the path. ModuleSpec module_spec; module_spec.GetFileSpec().GetFilename() = library_cstr; lldb_private::ModuleList matching_module_list; bool module_already_loaded = false; if (process.GetTarget().GetImages().FindModules(module_spec, matching_module_list)) { matching_module_list.ForEach( [&module_already_loaded, &module_spec, &framework_name](const ModuleSP &module_sp) -> bool { module_already_loaded = module_spec.GetFileSpec().GetPath().find( framework_name) != std::string::npos; return module_already_loaded == false; // Keep iterating if we didn't find the right module }); } // If we already have this library loaded, don't try and load it again. if (module_already_loaded) { if (log) log->Printf("Skipping load of %s as it is already loaded.", framework_name.c_str()); return; } for (auto module : process.GetTarget().GetImages().Modules()) { FileSpec module_file = module->GetFileSpec(); if (module_file.GetFilename() == library_cstr) { std::string module_path = module_file.GetPath(); size_t framework_offset = module_path.rfind(framework_name); if (framework_offset != std::string::npos) { // The Framework is already loaded, so we don't need to try to load // it again. if (log) log->Printf("Skipping load of %s as it is already loaded.", framework_name.c_str()); return; } } } std::string framework_path("@rpath/"); framework_path.append(library_name); framework_path.append(".framework/"); framework_path.append(library_name); FileSpec framework_spec(framework_path.c_str(), false); if (LoadOneImage(process, framework_spec, load_image_error)) { if (log) log->Printf("Found framework at: %s.", framework_path.c_str()); return; } else all_dlopen_errors.Printf("Looking for \"%s\", error: %s\n", framework_path.c_str(), load_image_error.AsCString()); // And then in the various framework search paths. std::unordered_set<std::string> seen_paths; std::vector<std::string> uniqued_paths; for (const auto &framework_search_dir : swift_module->getASTContext().SearchPathOpts.FrameworkSearchPaths) { // The framework search dir as it comes from the AST context often has // duplicate entries, don't try to load along the same path twice. std::pair<std::unordered_set<std::string>::iterator, bool> insert_result = seen_paths.insert(framework_search_dir.Path); if (insert_result.second) { framework_path = framework_search_dir.Path; framework_path.append("/"); framework_path.append(library_name); framework_path.append(".framework/"); uniqued_paths.push_back(framework_path); } } uint32_t token = LLDB_INVALID_IMAGE_TOKEN; PlatformSP platform_sp = process.GetTarget().GetPlatform(); Status error; FileSpec library_spec(library_name, false); FileSpec found_path; if (platform_sp) token = platform_sp->LoadImageUsingPaths(&process, library_spec, uniqued_paths, error, &found_path); if (token != LLDB_INVALID_IMAGE_TOKEN) { if (log) log->Printf("Found framework at: %s.", framework_path.c_str()); return; } else { all_dlopen_errors.Printf("Failed to find framework for \"%s\" looking" " along paths:\n", library_name); for (const std::string &path : uniqued_paths) all_dlopen_errors.Printf(" %s\n", path.c_str()); } // Maybe we were told to add a link library that exists in the system. I // tried just specifying Foo.framework/Foo and letting the system search // figure that out, but if DYLD_FRAMEWORK_FALLBACK_PATH is set // (e.g. in Xcode's test scheme) then these aren't found. So for now I // dial them in explicitly: std::string system_path("/System/Library/Frameworks/"); system_path.append(library_name); system_path.append(".framework/"); system_path.append(library_name); framework_spec.SetFile(system_path.c_str(), true); if (LoadOneImage(process, framework_spec, load_image_error)) return; else all_dlopen_errors.Printf("Looking for \"%s\"\n, error: %s\n", framework_path.c_str(), load_image_error.AsCString()); } break; case swift::LibraryKind::Library: { std::vector<std::string> search_paths; GetLibrarySearchPaths(search_paths, swift_module->getASTContext().SearchPathOpts); if (LoadLibraryUsingPaths(process, library_name, search_paths, true, all_dlopen_errors)) return; } break; } // If we get here, we aren't going to find this image, so add it to a // negative cache: if (runtime) runtime->AddToLibraryNegativeCache(library_name); current_error.SetErrorStringWithFormat( "Failed to load linked library %s of module %s - errors:\n%s\n", library_name, swift_module->getName().str().str().c_str(), all_dlopen_errors.GetData()); }; swift_module->forAllVisibleModules( {}, [&](swift::ModuleDecl::ImportedModule import) { import.second->collectLinkLibraries(addLinkLibrary); return true; }); error = current_error; } bool SwiftASTContext::LoadLibraryUsingPaths( Process &process, llvm::StringRef library_name, std::vector<std::string> &search_paths, bool check_rpath, StreamString &all_dlopen_errors) { VALID_OR_RETURN(false); Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_TYPES)); SwiftLanguageRuntime *runtime = process.GetSwiftLanguageRuntime(); if (!runtime) { all_dlopen_errors.PutCString( "Can't load Swift libraries without a language runtime."); return false; } if (ConstString::Equals(runtime->GetStandardLibraryBaseName(), ConstString(library_name))) { // Never dlopen the standard library. Some binaries statically link to the // Swift standard library and dlopening it here will cause ObjC runtime // conflicts. // If you want to run Swift expressions you have to arrange to load the // Swift standard library by hand before doing so. if (log) log->Printf("Skipping swift standard library \"%s\" - we don't hand load " "that one.", runtime->GetStandardLibraryBaseName().AsCString()); return true; } PlatformSP platform_sp(process.GetTarget().GetPlatform()); std::string library_fullname; if (platform_sp) { library_fullname = platform_sp->GetFullNameForDylib(ConstString(library_name)).AsCString(); } else // This is the old way, and we shouldn't use it except on Mac OS { #ifdef __APPLE__ library_fullname = "lib"; library_fullname.append(library_name); library_fullname.append(".dylib"); #else return false; #endif } ModuleSpec module_spec; module_spec.GetFileSpec().GetFilename().SetCString(library_fullname.c_str()); lldb_private::ModuleList matching_module_list; if (process.GetTarget().GetImages().FindModules(module_spec, matching_module_list) > 0) { if (log) log->Printf("Skipping module %s as it is already loaded.", library_fullname.c_str()); return true; } std::string library_path; std::unordered_set<std::string> seen_paths; Status load_image_error; std::vector<std::string> uniqued_paths; for (const std::string &library_search_dir : search_paths) { // The library search dir as it comes from the AST context often has // duplicate entries, so lets unique the path list before we send it // down to the target. std::pair<std::unordered_set<std::string>::iterator, bool> insert_result = seen_paths.insert(library_search_dir); if (insert_result.second) uniqued_paths.push_back(library_search_dir); } FileSpec library_spec(library_fullname, false); FileSpec found_library; uint32_t token = LLDB_INVALID_IMAGE_TOKEN; Status error; if (platform_sp) token = platform_sp->LoadImageUsingPaths(&process, library_spec, uniqued_paths, error, &found_library); if (token != LLDB_INVALID_IMAGE_TOKEN) { if (log) log->Printf("Found library at: %s.", found_library.GetCString()); return true; } else { all_dlopen_errors.Printf("Failed to find \"%s\" in paths:\n,", library_fullname.c_str()); for (const std::string &search_dir : uniqued_paths) all_dlopen_errors.Printf(" %s\n", search_dir.c_str()); } if (check_rpath) { // Let our RPATH help us out when finding the right library library_path = "@rpath/"; library_path += library_fullname; FileSpec link_lib_spec(library_path.c_str(), false); if (LoadOneImage(process, link_lib_spec, load_image_error)) { if (log) log->Printf("Found library using RPATH at: %s.", library_path.c_str()); return true; } else all_dlopen_errors.Printf("Failed to find \"%s\" on RPATH, error: %s\n", library_fullname.c_str(), load_image_error.AsCString()); } return false; } void SwiftASTContext::LoadExtraDylibs(Process &process, Status &error) { VALID_OR_RETURN_VOID(); error.Clear(); swift::IRGenOptions &irgen_options = GetIRGenOptions(); for (const swift::LinkLibrary &link_lib : irgen_options.LinkLibraries) { // We don't have to do frameworks here, they actually record their link // libraries properly. if (link_lib.getKind() == swift::LibraryKind::Library) { const char *library_name = link_lib.getName().data(); StreamString errors; std::vector<std::string> search_paths; GetLibrarySearchPaths(search_paths, m_compiler_invocation_ap->getSearchPathOptions()); bool success = LoadLibraryUsingPaths(process, library_name, search_paths, false, errors); if (!success) { error.SetErrorString(errors.GetData()); } } } } bool SwiftASTContext::RegisterSectionModules( Module &module, std::vector<std::string> &module_names) { VALID_OR_RETURN(false); Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); swift::SerializedModuleLoader *sml = GetSerializeModuleLoader(); if (sml) { SectionList *section_list = module.GetSectionList(); if (section_list) { SectionSP section_sp( section_list->FindSectionByType(eSectionTypeSwiftModules, true)); if (section_sp) { DataExtractor section_data; if (section_sp->GetSectionData(section_data)) { llvm::StringRef section_data_ref( (const char *)section_data.GetDataStart(), section_data.GetByteSize()); llvm::SmallVector<std::string, 4> llvm_modules; if (swift::parseASTSection(sml, section_data_ref, llvm_modules)) { for (auto module_name : llvm_modules) module_names.push_back(module_name); return true; } } } else { if (m_ast_file_data_map.find(&module) != m_ast_file_data_map.end()) return true; SymbolVendor *sym_vendor = module.GetSymbolVendor(); if (sym_vendor) { // Grab all the AST blobs from the symbol vendor. auto ast_file_datas = sym_vendor->GetASTData(eLanguageTypeSwift); if (log) log->Printf("SwiftASTContext::%s() retrieved %zu AST Data blobs " "from the symbol vendor.", __FUNCTION__, ast_file_datas.size()); // Add each of the AST blobs to the vector of AST blobs for the // module. auto &ast_vector = GetASTVectorForModule(&module); ast_vector.insert(ast_vector.end(), ast_file_datas.begin(), ast_file_datas.end()); // Retrieve the module names from the AST blobs retrieved from the // symbol vendor. size_t parse_fail_count = 0; size_t ast_number = 0; for (auto ast_file_data_sp : ast_file_datas) { // Parse the AST section info from the AST blob. ++ast_number; llvm::StringRef section_data_ref( (const char *)ast_file_data_sp->GetBytes(), ast_file_data_sp->GetByteSize()); llvm::SmallVector<std::string, 4> llvm_modules; if (swift::parseASTSection(sml, section_data_ref, llvm_modules)) { // Collect the LLVM module names referenced by the AST. for (auto module_name : llvm_modules) module_names.push_back(module_name); if (log) log->Printf("SwiftASTContext::%s() - parsed %zu llvm modules " "from Swift AST section %zu of %zu.", __FUNCTION__, llvm_modules.size(), ast_number, ast_file_datas.size()); } else { // Keep track of the fact that we failed to parse the AST // section info. if (log) log->Printf("SwiftASTContext::%s() - failed to parse AST " "section %zu of %zu.", __FUNCTION__, ast_number, ast_file_datas.size()); ++parse_fail_count; } } if (!ast_file_datas.empty() && (parse_fail_count == 0)) { // We found AST data entries and we successfully parsed all of // them. return true; } } } } } return false; } void SwiftASTContext::ValidateSectionModules( Module &module, const std::vector<std::string> &module_names) { VALID_OR_RETURN_VOID(); Status error; for (const std::string &module_name : module_names) if (!GetModule(ConstString(module_name.c_str()), error)) module.ReportWarning("unable to load swift module '%s' (%s)", module_name.c_str(), error.AsCString()); } swift::Identifier SwiftASTContext::GetIdentifier(const char *name) { VALID_OR_RETURN(swift::Identifier()); return GetASTContext()->getIdentifier(llvm::StringRef(name)); } swift::Identifier SwiftASTContext::GetIdentifier(const llvm::StringRef &name) { VALID_OR_RETURN(swift::Identifier()); return GetASTContext()->getIdentifier(name); } ConstString SwiftASTContext::GetMangledTypeName(swift::TypeBase *type_base) { VALID_OR_RETURN(ConstString()); auto iter = m_type_to_mangled_name_map.find(type_base), end = m_type_to_mangled_name_map.end(); if (iter != end) return ConstString(iter->second); swift::Type swift_type(type_base); bool has_archetypes = swift_type->hasArchetype(); if (!has_archetypes) { swift::Mangle::ASTMangler mangler(true); std::string s = mangler.mangleTypeForDebugger(swift_type, nullptr, nullptr); if (!s.empty()) { ConstString mangled_cs(s.c_str()); CacheDemangledType(mangled_cs.AsCString(), type_base); return mangled_cs; } } return ConstString(); } void SwiftASTContext::CacheDemangledType(const char *name, swift::TypeBase *found_type) { VALID_OR_RETURN_VOID(); m_type_to_mangled_name_map.insert(std::make_pair(found_type, name)); m_mangled_name_to_type_map.insert(std::make_pair(name, found_type)); } void SwiftASTContext::CacheDemangledTypeFailure(const char *name) { VALID_OR_RETURN_VOID(); m_negative_type_cache.Insert(name); } CompilerType SwiftASTContext::GetTypeFromMangledTypename(const char *mangled_typename, Status &error) { VALID_OR_RETURN(CompilerType()); if (mangled_typename && SwiftLanguageRuntime::IsSwiftMangledName(mangled_typename)) { Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); if (log) log->Printf("((SwiftASTContext*)%p)->GetTypeFromMangledTypename('%s')", this, mangled_typename); swift::ASTContext *ast_ctx = GetASTContext(); if (!ast_ctx) { if (log) log->Printf("((SwiftASTContext*)%p)->GetTypeFromMangledTypename('%s') " "-- null Swift AST Context", this, mangled_typename); error.SetErrorString("null Swift AST Context"); return CompilerType(); } error.Clear(); // If we were to crash doing this, remember what type caused it llvm::PrettyStackTraceFormat PST("error finding type for %s", mangled_typename); ConstString mangled_name(mangled_typename); swift::TypeBase *found_type = m_mangled_name_to_type_map.lookup(mangled_name.GetCString()); if (found_type) { if (log) log->Printf("((SwiftASTContext*)%p)->GetTypeFromMangledTypename('%s') " "-- found in the positive cache", this, mangled_typename); return CompilerType(ast_ctx, found_type); } if (m_negative_type_cache.Lookup(mangled_name.GetCString())) { if (log) log->Printf("((SwiftASTContext*)%p)->GetTypeFromMangledTypename('%s') " "-- found in the negative cache", this, mangled_typename); return CompilerType(); } if (log) log->Printf("((SwiftASTContext*)%p)->GetTypeFromMangledTypename('%s') -- " "not cached, searching", this, mangled_typename); std::string swift_error; found_type = swift::ide::getTypeFromMangledSymbolname( *ast_ctx, mangled_typename, swift_error) .getPointer(); if (found_type) { CacheDemangledType(mangled_name.GetCString(), found_type); CompilerType result_type(ast_ctx, found_type); if (log) log->Printf("((SwiftASTContext*)%p)->GetTypeFromMangledTypename('%s') " "-- found %s", this, mangled_typename, result_type.GetTypeName().GetCString()); return result_type; } else { if (log) log->Printf("((SwiftASTContext*)%p)->GetTypeFromMangledTypename('%s') " "-- error: %s", this, mangled_typename, swift_error.c_str()); error.SetErrorStringWithFormat("type for typename '%s' was not found", mangled_typename); CacheDemangledTypeFailure(mangled_name.GetCString()); return CompilerType(); } } error.SetErrorStringWithFormat("typename '%s' is not a valid Swift mangled " "typename, it should begin with _T", mangled_typename); return CompilerType(); } CompilerType SwiftASTContext::GetVoidFunctionType() { VALID_OR_RETURN(CompilerType()); if (!m_void_function_type) { swift::ASTContext *ast = GetASTContext(); swift::Type empty_tuple_type(swift::TupleType::getEmpty(*ast)); m_void_function_type = CompilerType( ast, swift::FunctionType::get(empty_tuple_type, empty_tuple_type)); } return m_void_function_type; } static CompilerType ValueDeclToType(swift::ValueDecl *decl, swift::ASTContext *ast) { if (decl) { switch (decl->getKind()) { case swift::DeclKind::TypeAlias: { swift::TypeAliasDecl *alias_decl = swift::cast<swift::TypeAliasDecl>(decl); if (alias_decl->hasInterfaceType()) { swift::Type swift_type = swift::NameAliasType::get( alias_decl, swift::Type(), swift::SubstitutionMap(), alias_decl->getUnderlyingTypeLoc().getType()); return CompilerType(ast, swift_type.getPointer()); } break; } case swift::DeclKind::Enum: case swift::DeclKind::Struct: case swift::DeclKind::Protocol: case swift::DeclKind::Class: { swift::NominalTypeDecl *nominal_decl = swift::cast<swift::NominalTypeDecl>(decl); if (nominal_decl->hasInterfaceType()) { swift::Type swift_type = nominal_decl->getDeclaredType(); return CompilerType(ast, swift_type.getPointer()); } } break; default: break; } } return CompilerType(); } CompilerType SwiftASTContext::FindQualifiedType(const char *qualified_name) { VALID_OR_RETURN(CompilerType()); if (qualified_name && qualified_name[0]) { const char *dot_pos = strchr(qualified_name, '.'); if (dot_pos) { ConstString module_name(qualified_name, dot_pos - qualified_name); swift::ModuleDecl *swift_module = GetCachedModule(module_name); if (swift_module) { swift::ModuleDecl::AccessPathTy access_path; llvm::SmallVector<swift::ValueDecl *, 4> decls; const char *module_type_name = dot_pos + 1; swift_module->lookupValue(access_path, GetIdentifier(module_type_name), swift::NLKind::UnqualifiedLookup, decls); for (auto decl : decls) { CompilerType type = ValueDeclToType(decl, GetASTContext()); if (type) return type; } } } } return CompilerType(); } static CompilerType DeclToType(swift::Decl *decl, swift::ASTContext *ast) { if (swift::ValueDecl *value_decl = swift::dyn_cast_or_null<swift::ValueDecl>(decl)) return ValueDeclToType(value_decl, ast); return CompilerType(); } static SwiftASTContext::TypeOrDecl DeclToTypeOrDecl(swift::ASTContext *ast, swift::Decl *decl) { if (decl) { switch (decl->getKind()) { case swift::DeclKind::Import: case swift::DeclKind::Extension: case swift::DeclKind::PatternBinding: case swift::DeclKind::TopLevelCode: case swift::DeclKind::GenericTypeParam: case swift::DeclKind::AssociatedType: case swift::DeclKind::EnumElement: case swift::DeclKind::EnumCase: case swift::DeclKind::IfConfig: case swift::DeclKind::Param: case swift::DeclKind::Module: case swift::DeclKind::MissingMember: break; case swift::DeclKind::InfixOperator: case swift::DeclKind::PrefixOperator: case swift::DeclKind::PostfixOperator: case swift::DeclKind::PrecedenceGroup: return decl; case swift::DeclKind::TypeAlias: { swift::TypeAliasDecl *alias_decl = swift::cast<swift::TypeAliasDecl>(decl); if (alias_decl->hasInterfaceType()) { swift::Type swift_type = swift::NameAliasType::get( alias_decl, swift::Type(), swift::SubstitutionMap(), alias_decl->getUnderlyingTypeLoc().getType()); return CompilerType(ast, swift_type.getPointer()); } } break; case swift::DeclKind::Enum: case swift::DeclKind::Struct: case swift::DeclKind::Class: case swift::DeclKind::Protocol: { swift::NominalTypeDecl *nominal_decl = swift::cast<swift::NominalTypeDecl>(decl); if (nominal_decl->hasInterfaceType()) { swift::Type swift_type = nominal_decl->getDeclaredType(); return CompilerType(ast, swift_type.getPointer()); } } break; case swift::DeclKind::Func: case swift::DeclKind::Var: return decl; case swift::DeclKind::Subscript: case swift::DeclKind::Constructor: case swift::DeclKind::Destructor: break; } } return CompilerType(); } size_t SwiftASTContext::FindContainedTypeOrDecl(llvm::StringRef name, TypeOrDecl container_type_or_decl, TypesOrDecls &results, bool append) { VALID_OR_RETURN(0); if (!append) results.clear(); size_t size_before = results.size(); CompilerType container_type = container_type_or_decl.Apply<CompilerType>( [](CompilerType type) -> CompilerType { return type; }, [this](swift::Decl *decl) -> CompilerType { return DeclToType(decl, GetASTContext()); }); if (false == name.empty() && llvm::dyn_cast_or_null<SwiftASTContext>(container_type.GetTypeSystem())) { swift::Type swift_type(GetSwiftType(container_type)); if (!swift_type) return 0; swift::CanType swift_can_type(swift_type->getCanonicalType()); swift::NominalType *nominal_type = swift_can_type->getAs<swift::NominalType>(); if (!nominal_type) return 0; swift::NominalTypeDecl *nominal_decl = nominal_type->getDecl(); llvm::ArrayRef<swift::ValueDecl *> decls = nominal_decl->lookupDirect( swift::DeclName(m_ast_context_ap->getIdentifier(name))); for (auto decl : decls) results.emplace(DeclToTypeOrDecl(GetASTContext(), decl)); } return results.size() - size_before; } CompilerType SwiftASTContext::FindType(const char *name, swift::ModuleDecl *swift_module) { VALID_OR_RETURN(CompilerType()); std::set<CompilerType> search_results; FindTypes(name, swift_module, search_results, false); if (search_results.empty()) return CompilerType(); else return *search_results.begin(); } llvm::Optional<SwiftASTContext::TypeOrDecl> SwiftASTContext::FindTypeOrDecl(const char *name, swift::ModuleDecl *swift_module) { VALID_OR_RETURN(llvm::Optional<SwiftASTContext::TypeOrDecl>()); TypesOrDecls search_results; FindTypesOrDecls(name, swift_module, search_results, false); if (search_results.empty()) return llvm::Optional<SwiftASTContext::TypeOrDecl>(); else return *search_results.begin(); } size_t SwiftASTContext::FindTypes(const char *name, swift::ModuleDecl *swift_module, std::set<CompilerType> &results, bool append) { VALID_OR_RETURN(0); if (!append) results.clear(); size_t before = results.size(); TypesOrDecls types_or_decls_results; FindTypesOrDecls(name, swift_module, types_or_decls_results); for (const auto &result : types_or_decls_results) { CompilerType type = result.Apply<CompilerType>( [](CompilerType type) -> CompilerType { return type; }, [this](swift::Decl *decl) -> CompilerType { if (swift::ValueDecl *value_decl = swift::dyn_cast_or_null<swift::ValueDecl>(decl)) { if (value_decl->hasInterfaceType()) { swift::Type swift_type = value_decl->getInterfaceType(); swift::MetatypeType *meta_type = swift_type->getAs<swift::MetatypeType>(); swift::ASTContext *ast = GetASTContext(); if (meta_type) return CompilerType(ast, meta_type->getInstanceType().getPointer()); else return CompilerType(ast, swift_type.getPointer()); } } return CompilerType(); }); results.emplace(type); } return results.size() - before; } size_t SwiftASTContext::FindTypesOrDecls(const char *name, swift::ModuleDecl *swift_module, TypesOrDecls &results, bool append) { VALID_OR_RETURN(0); if (!append) results.clear(); size_t before = results.size(); if (name && name[0] && swift_module) { swift::ModuleDecl::AccessPathTy access_path; llvm::SmallVector<swift::ValueDecl *, 4> value_decls; swift::Identifier identifier(GetIdentifier(name)); if (strchr(name, '.')) swift_module->lookupValue(access_path, identifier, swift::NLKind::QualifiedLookup, value_decls); else swift_module->lookupValue(access_path, identifier, swift::NLKind::UnqualifiedLookup, value_decls); if (identifier.isOperator()) { swift::OperatorDecl *op_decl = swift_module->lookupPrefixOperator(identifier); if (op_decl) results.emplace(DeclToTypeOrDecl(GetASTContext(), op_decl)); if ((op_decl = swift_module->lookupInfixOperator(identifier))) results.emplace(DeclToTypeOrDecl(GetASTContext(), op_decl)); if ((op_decl = swift_module->lookupPostfixOperator(identifier))) results.emplace(DeclToTypeOrDecl(GetASTContext(), op_decl)); } if (swift::PrecedenceGroupDecl *pg_decl = swift_module->lookupPrecedenceGroup(identifier)) results.emplace(DeclToTypeOrDecl(GetASTContext(), pg_decl)); for (auto decl : value_decls) results.emplace(DeclToTypeOrDecl(GetASTContext(), decl)); } return results.size() - before; } size_t SwiftASTContext::FindType(const char *name, std::set<CompilerType> &results, bool append) { VALID_OR_RETURN(0); if (!append) results.clear(); auto iter = m_swift_module_cache.begin(), end = m_swift_module_cache.end(); size_t count = 0; std::function<void(swift::ModuleDecl *)> lookup_func = [this, name, &results, &count](swift::ModuleDecl *module) -> void { CompilerType candidate(this->FindType(name, module)); if (candidate) { ++count; results.insert(candidate); } }; for (; iter != end; iter++) lookup_func(iter->second); if (m_scratch_module) lookup_func(m_scratch_module); return count; } CompilerType SwiftASTContext::FindFirstType(const char *name, const ConstString &module_name) { VALID_OR_RETURN(CompilerType()); if (name && name[0]) { if (module_name) { return FindType(name, GetCachedModule(module_name)); } else { std::set<CompilerType> types; FindType(name, types); if (!types.empty()) return *types.begin(); } } return CompilerType(); } CompilerType SwiftASTContext::ImportType(CompilerType &type, Status &error) { VALID_OR_RETURN(CompilerType()); if (m_ast_context_ap.get() == NULL) return CompilerType(); SwiftASTContext *swift_ast_ctx = llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem()); if (swift_ast_ctx == nullptr) { error.SetErrorString("Can't import clang type into a Swift ASTContext."); return CompilerType(); } else if (swift_ast_ctx == this) { // This is the same AST context, so the type is already imported... return type; } // For now we're going to do this all using mangled names. If we find that is // too slow, we can use the TypeBase * in the CompilerType to match this to // the version of the type we got from the mangled name in the original // swift::ASTContext. ConstString mangled_name(type.GetMangledTypeName()); if (mangled_name) { swift::TypeBase *our_type_base = m_mangled_name_to_type_map.lookup(mangled_name.GetCString()); if (our_type_base) return CompilerType(m_ast_context_ap.get(), our_type_base); else { Status error; CompilerType our_type( GetTypeFromMangledTypename(mangled_name.GetCString(), error)); if (error.Success()) return our_type; } } return CompilerType(); } swift::IRGenDebugInfoKind SwiftASTContext::GetGenerateDebugInfo() { return GetIRGenOptions().DebugInfoKind; } swift::PrintOptions SwiftASTContext::GetUserVisibleTypePrintingOptions( bool print_help_if_available) { swift::PrintOptions print_options; print_options.SynthesizeSugarOnTypes = true; print_options.VarInitializers = true; print_options.TypeDefinitions = true; print_options.PrintGetSetOnRWProperties = true; print_options.SkipImplicit = false; print_options.PreferTypeRepr = true; print_options.FunctionDefinitions = true; print_options.FullyQualifiedTypesIfAmbiguous = true; print_options.FullyQualifiedTypes = true; print_options.ExplodePatternBindingDecls = false; print_options.PrintDocumentationComments = print_options.PrintRegularClangComments = print_help_if_available; return print_options; } void SwiftASTContext::SetGenerateDebugInfo(swift::IRGenDebugInfoKind b) { GetIRGenOptions().DebugInfoKind = b; } llvm::TargetOptions *SwiftASTContext::getTargetOptions() { if (m_target_options_ap.get() == NULL) { m_target_options_ap.reset(new llvm::TargetOptions()); } return m_target_options_ap.get(); } swift::ModuleDecl *SwiftASTContext::GetScratchModule() { VALID_OR_RETURN(nullptr); if (m_scratch_module == nullptr) m_scratch_module = swift::ModuleDecl::create( GetASTContext()->getIdentifier("__lldb_scratch_module"), *GetASTContext()); return m_scratch_module; } swift::SILModule *SwiftASTContext::GetSILModule() { VALID_OR_RETURN(nullptr); if (m_sil_module_ap.get() == NULL) m_sil_module_ap = swift::SILModule::createEmptyModule(GetScratchModule(), GetSILOptions()); return m_sil_module_ap.get(); } swift::irgen::IRGenerator & SwiftASTContext::GetIRGenerator(swift::IRGenOptions &opts, swift::SILModule &module) { if (m_ir_generator_ap.get() == nullptr) { m_ir_generator_ap.reset(new swift::irgen::IRGenerator(opts, module)); } return *m_ir_generator_ap.get(); } swift::irgen::IRGenModule &SwiftASTContext::GetIRGenModule() { VALID_OR_RETURN(*m_ir_gen_module_ap); llvm::call_once(m_ir_gen_module_once, [this]() { // Make sure we have a good ClangImporter. GetClangImporter(); swift::IRGenOptions &ir_gen_opts = GetIRGenOptions(); std::string error_str; std::string triple = GetTriple(); const llvm::Target *llvm_target = llvm::TargetRegistry::lookupTarget(triple, error_str); llvm::CodeGenOpt::Level optimization_level = llvm::CodeGenOpt::Level::None; // Create a target machine. llvm::TargetMachine *target_machine = llvm_target->createTargetMachine( triple, "generic", // cpu "", // features *getTargetOptions(), llvm::Reloc::Static, // TODO verify with Sean, Default went away llvm::None, optimization_level); if (target_machine) { // Set the module's string representation. const llvm::DataLayout data_layout = target_machine->createDataLayout(); llvm::Triple llvm_triple(triple); swift::SILModule *sil_module = GetSILModule(); if (sil_module != nullptr) { swift::irgen::IRGenerator &ir_generator = GetIRGenerator(ir_gen_opts, *sil_module); swift::PrimarySpecificPaths PSPs = GetCompilerInvocation() .getFrontendOptions() .InputsAndOutputs.getPrimarySpecificPathsForAtMostOnePrimary(); m_ir_gen_module_ap.reset(new swift::irgen::IRGenModule( ir_generator, ir_generator.createTargetMachine(), nullptr, GetGlobalLLVMContext(), ir_gen_opts.ModuleName, PSPs.OutputFilename, PSPs.MainInputFilenameForDebugInfo)); llvm::Module *llvm_module = m_ir_gen_module_ap->getModule(); llvm_module->setDataLayout(data_layout.getStringRepresentation()); llvm_module->setTargetTriple(triple); } } }); return *m_ir_gen_module_ap; } CompilerType SwiftASTContext::CreateTupleType(const std::vector<CompilerType> &elements) { VALID_OR_RETURN(CompilerType()); Status error; if (elements.size() == 0) return CompilerType(GetASTContext(), GetASTContext()->TheEmptyTupleType); else { std::vector<swift::TupleTypeElt> tuple_elems; for (const CompilerType &type : elements) { if (auto swift_type = GetSwiftType(type)) tuple_elems.push_back(swift::TupleTypeElt(swift_type)); else return CompilerType(); } llvm::ArrayRef<swift::TupleTypeElt> fields(tuple_elems); return CompilerType( GetASTContext(), swift::TupleType::get(fields, *GetASTContext()).getPointer()); } } CompilerType SwiftASTContext::CreateTupleType(const std::vector<TupleElement> &elements) { VALID_OR_RETURN(CompilerType()); Status error; if (elements.size() == 0) return CompilerType(GetASTContext(), GetASTContext()->TheEmptyTupleType); else { std::vector<swift::TupleTypeElt> tuple_elems; for (const TupleElement &element : elements) { if (auto swift_type = GetSwiftType(element.element_type)) { if (element.element_name.IsEmpty()) tuple_elems.push_back(swift::TupleTypeElt(swift_type)); else tuple_elems.push_back(swift::TupleTypeElt( swift_type, m_ast_context_ap->getIdentifier( element.element_name.GetCString()))); } else return CompilerType(); } llvm::ArrayRef<swift::TupleTypeElt> fields(tuple_elems); return CompilerType( GetASTContext(), swift::TupleType::get(fields, *GetASTContext()).getPointer()); } } CompilerType SwiftASTContext::CreateFunctionType(CompilerType arg_type, CompilerType ret_type, bool throws) { VALID_OR_RETURN(CompilerType()); if (!llvm::dyn_cast_or_null<SwiftASTContext>(arg_type.GetTypeSystem()) || !llvm::dyn_cast_or_null<SwiftASTContext>(ret_type.GetTypeSystem())) return CompilerType(); swift::FunctionType::ExtInfo ext_info; if (throws) ext_info = ext_info.withThrows(); return CompilerType(GetASTContext(), swift::FunctionType::get( GetSwiftType(arg_type), GetSwiftType(ret_type), ext_info)); } CompilerType SwiftASTContext::GetErrorType() { VALID_OR_RETURN(CompilerType()); swift::ASTContext *swift_ctx = GetASTContext(); if (swift_ctx) { // Getting the error type requires the Stdlib module be loaded, but doesn't // cause it to be loaded. // Do that here: swift_ctx->getStdlibModule(true); swift::NominalTypeDecl *error_type_decl = GetASTContext()->getErrorDecl(); if (error_type_decl) { auto error_type = error_type_decl->getDeclaredType().getPointer(); return CompilerType(GetASTContext(), error_type); } } return CompilerType(); } CompilerType SwiftASTContext::GetNSErrorType(Status &error) { VALID_OR_RETURN(CompilerType()); return GetTypeFromMangledTypename(SwiftLanguageRuntime::GetCurrentMangledName("_TtC10Foundation7NSError").c_str(), error); } CompilerType SwiftASTContext::CreateMetatypeType(CompilerType instance_type) { VALID_OR_RETURN(CompilerType()); if (llvm::dyn_cast_or_null<SwiftASTContext>(instance_type.GetTypeSystem())) return CompilerType(GetASTContext(), swift::MetatypeType::get(GetSwiftType(instance_type), *GetASTContext())); return CompilerType(); } SwiftASTContext *SwiftASTContext::GetSwiftASTContext(swift::ASTContext *ast) { SwiftASTContext *swift_ast = GetASTMap().Lookup(ast); return swift_ast; } uint32_t SwiftASTContext::GetPointerByteSize() { VALID_OR_RETURN(0); if (m_pointer_byte_size == 0) { swift::ASTContext *ast = GetASTContext(); m_pointer_byte_size = CompilerType(ast, ast->TheRawPointerType.getPointer()) .GetByteSize(nullptr); } return m_pointer_byte_size; } uint32_t SwiftASTContext::GetPointerBitAlignment() { VALID_OR_RETURN(0); if (m_pointer_bit_align == 0) { swift::ASTContext *ast = GetASTContext(); m_pointer_bit_align = CompilerType(ast, ast->TheRawPointerType.getPointer()) .GetAlignedBitSize(); } return m_pointer_bit_align; } bool SwiftASTContext::HasErrors() { if (m_diagnostic_consumer_ap.get()) return ( static_cast<StoringDiagnosticConsumer *>(m_diagnostic_consumer_ap.get()) ->NumErrors() != 0); else return false; } bool SwiftASTContext::HasFatalErrors(swift::ASTContext *ast_context) { return (ast_context && ast_context->Diags.hasFatalErrorOccurred()); } void SwiftASTContext::ClearDiagnostics() { assert(!HasFatalErrors() && "Never clear a fatal diagnostic!"); if (m_diagnostic_consumer_ap.get()) static_cast<StoringDiagnosticConsumer *>(m_diagnostic_consumer_ap.get()) ->Clear(); } bool SwiftASTContext::SetColorizeDiagnostics(bool b) { if (m_diagnostic_consumer_ap.get()) return static_cast<StoringDiagnosticConsumer *>( m_diagnostic_consumer_ap.get()) ->SetColorize(b); return false; } void SwiftASTContext::PrintDiagnostics(DiagnosticManager &diagnostic_manager, uint32_t bufferID, uint32_t first_line, uint32_t last_line, uint32_t line_offset) { // If this is a fatal error, copy the error into the AST context's fatal error // field, and then put it to the stream, otherwise just dump the diagnostics // to the stream. // N.B. you cannot use VALID_OR_RETURN_VOID here since that exits if you have // fatal errors, which are what we are trying to print here. if (!m_ast_context_ap.get()) { SymbolFile *sym_file = GetSymbolFile(); if (sym_file) { ConstString name = sym_file->GetObjectFile()->GetModule()->GetObjectName(); m_fatal_errors.SetErrorStringWithFormat( "Null context for %s.", name.AsCString()); } else { m_fatal_errors.SetErrorString("Unknown fatal error occurred."); } return; } if (m_ast_context_ap->Diags.hasFatalErrorOccurred() && !m_reported_fatal_error) { DiagnosticManager fatal_diagnostics; if (m_diagnostic_consumer_ap.get()) static_cast<StoringDiagnosticConsumer *>(m_diagnostic_consumer_ap.get()) ->PrintDiagnostics(fatal_diagnostics, bufferID, first_line, last_line, line_offset); if (fatal_diagnostics.Diagnostics().size()) m_fatal_errors.SetErrorString(fatal_diagnostics.GetString().data()); else m_fatal_errors.SetErrorString("Unknown fatal error occurred."); m_reported_fatal_error = true; for (const DiagnosticList::value_type &fatal_diagnostic : fatal_diagnostics.Diagnostics()) { // FIXME: need to add a CopyDiagnostic operation for copying diagnostics // from one manager to another. diagnostic_manager.AddDiagnostic( fatal_diagnostic->GetMessage(), fatal_diagnostic->GetSeverity(), fatal_diagnostic->getKind(), fatal_diagnostic->GetCompilerID()); } } else { if (m_diagnostic_consumer_ap.get()) static_cast<StoringDiagnosticConsumer *>(m_diagnostic_consumer_ap.get()) ->PrintDiagnostics(diagnostic_manager, bufferID, first_line, last_line, line_offset); } } void SwiftASTContext::ModulesDidLoad(ModuleList &module_list) { ClearModuleDependentCaches(); } void SwiftASTContext::ClearModuleDependentCaches() { m_negative_type_cache.Clear(); m_extra_type_info_cache.Clear(); } void SwiftASTContext::DumpConfiguration(Log *log) { VALID_OR_RETURN_VOID(); if (!log) return; log->Printf("(SwiftASTContext*)%p:", this); if (!m_ast_context_ap) log->Printf(" (no AST context)"); log->Printf(" Architecture : %s", m_ast_context_ap->LangOpts.Target.getTriple().c_str()); log->Printf(" SDK path : %s", m_ast_context_ap->SearchPathOpts.SDKPath.c_str()); log->Printf(" Runtime resource path : %s", m_ast_context_ap->SearchPathOpts.RuntimeResourcePath.c_str()); log->Printf(" Runtime library path : %s", m_ast_context_ap->SearchPathOpts.RuntimeLibraryPath.c_str()); log->Printf( " Runtime library import path : %s", m_ast_context_ap->SearchPathOpts.RuntimeLibraryImportPath.c_str()); log->Printf(" Framework search paths : (%llu items)", (unsigned long long) m_ast_context_ap->SearchPathOpts.FrameworkSearchPaths.size()); for (const auto &framework_search_path : m_ast_context_ap->SearchPathOpts.FrameworkSearchPaths) { log->Printf(" %s", framework_search_path.Path.c_str()); } log->Printf(" Import search paths : (%llu items)", (unsigned long long) m_ast_context_ap->SearchPathOpts.ImportSearchPaths.size()); for (std::string &import_search_path : m_ast_context_ap->SearchPathOpts.ImportSearchPaths) { log->Printf(" %s", import_search_path.c_str()); } swift::ClangImporterOptions &clang_importer_options = GetClangImporterOptions(); log->Printf(" Extra clang arguments : (%llu items)", (unsigned long long)clang_importer_options.ExtraArgs.size()); for (std::string &extra_arg : clang_importer_options.ExtraArgs) { log->Printf(" %s", extra_arg.c_str()); } } bool SwiftASTContext::HasTarget() const { lldb::TargetWP empty_wp; // If either call to "std::weak_ptr::owner_before(...) value returns true, // this indicates that m_section_wp once contained (possibly still does) a // reference to a valid shared pointer. This helps us know if we had a valid // reference to a target which is now invalid because the target was deleted. return empty_wp.owner_before(m_target_wp) || m_target_wp.owner_before(empty_wp); } bool SwiftASTContext::CheckProcessChanged() { if (HasTarget()) { TargetSP target_sp(m_target_wp.lock()); if (target_sp) { Process *process = target_sp->GetProcessSP().get(); if (m_process == NULL) { if (process) m_process = process; } else { if (m_process != process) return true; } } } return false; } void SwiftASTContext::AddDebuggerClient( swift::DebuggerClient *debugger_client) { m_debugger_clients.push_back( std::unique_ptr<swift::DebuggerClient>(debugger_client)); } SwiftASTContext::ExtraTypeInformation::ExtraTypeInformation() : m_flags(false, false) {} SwiftASTContext::ExtraTypeInformation::ExtraTypeInformation( swift::CanType swift_can_type) : m_flags(false, false) { static ConstString g_rawValue("rawValue"); swift::ASTContext &ast_ctx = swift_can_type->getASTContext(); SwiftASTContext *swift_ast = SwiftASTContext::GetSwiftASTContext(&ast_ctx); if (swift_ast) { swift::ProtocolDecl *option_set = ast_ctx.getProtocol(swift::KnownProtocolKind::OptionSet); if (option_set) { if (auto nominal_decl = swift_can_type.getNominalOrBoundGenericNominal()) { for (swift::ProtocolDecl *protocol_decl : nominal_decl->getAllProtocols()) { if (protocol_decl == option_set) { for (swift::VarDecl *stored_property : nominal_decl->getStoredProperties()) { swift::Identifier name = stored_property->getName(); if (name.str() == g_rawValue.GetStringRef()) { m_flags.m_is_trivial_option_set = true; break; } } } } } } } if (auto metatype_type = swift::dyn_cast_or_null<swift::MetatypeType>( swift_can_type)) { if (!metatype_type->hasRepresentation() || (swift::MetatypeRepresentation::Thin == metatype_type->getRepresentation())) m_flags.m_is_zero_size = true; } else if (auto enum_decl = swift_can_type->getEnumOrBoundGenericEnum()) { size_t num_nopayload = 0, num_payload = 0; for (auto the_case : enum_decl->getAllElements()) { if (the_case->getArgumentInterfaceType()) { num_payload = 1; break; } else { if (++num_nopayload > 1) break; } } if (num_nopayload == 1 && num_payload == 0) m_flags.m_is_zero_size = true; } else if (auto struct_decl = swift_can_type->getStructOrBoundGenericStruct()) { bool has_storage = false; auto members = struct_decl->getMembers(); for (const auto &member : members) { if (swift::VarDecl *var_decl = swift::dyn_cast<swift::VarDecl>(member)) { if (!var_decl->isStatic() && var_decl->hasStorage()) { has_storage = true; break; } } } m_flags.m_is_zero_size = !has_storage; } else if (auto tuple_type = swift::dyn_cast_or_null<swift::TupleType>( swift_can_type)) { m_flags.m_is_zero_size = (tuple_type->getNumElements() == 0); } } SwiftASTContext::ExtraTypeInformation SwiftASTContext::GetExtraTypeInformation(void *type) { if (!type) return ExtraTypeInformation(); swift::CanType swift_can_type; void *swift_can_type_ptr = nullptr; if (auto swift_type = GetSwiftType(type)) { swift_can_type = swift_type->getCanonicalType(); swift_can_type_ptr = swift_can_type.getPointer(); } if (!swift_can_type_ptr) return ExtraTypeInformation(); ExtraTypeInformation eti; if (!m_extra_type_info_cache.Lookup(swift_can_type_ptr, eti)) { ExtraTypeInformation extra_info(swift_can_type); m_extra_type_info_cache.Insert(swift_can_type_ptr, extra_info); return extra_info; } else { return eti; } } bool SwiftASTContext::DeclContextIsStructUnionOrClass(void *opaque_decl_ctx) { return false; } ConstString SwiftASTContext::DeclContextGetName(void *opaque_decl_ctx) { return ConstString(); } ConstString SwiftASTContext::DeclContextGetScopeQualifiedName(void *opaque_decl_ctx) { return ConstString(); } bool SwiftASTContext::DeclContextIsClassMethod( void *opaque_decl_ctx, lldb::LanguageType *language_ptr, bool *is_instance_method_ptr, ConstString *language_object_name_ptr) { return false; } /////////// //////////////////// /////////// bool SwiftASTContext::IsArrayType(void *type, CompilerType *element_type_ptr, uint64_t *size, bool *is_incomplete) { VALID_OR_RETURN(false); swift::CanType swift_can_type(GetCanonicalSwiftType(type)); swift::BoundGenericStructType *struct_type = swift_can_type->getAs<swift::BoundGenericStructType>(); if (struct_type) { swift::StructDecl *struct_decl = struct_type->getDecl(); if (strcmp(struct_decl->getName().get(), "Array") != 0) return false; if (!struct_decl->getModuleContext()->isStdlibModule()) return false; const llvm::ArrayRef<swift::Type> &args = struct_type->getGenericArgs(); if (args.size() != 1) return false; if (is_incomplete) *is_incomplete = true; if (size) *size = 0; if (element_type_ptr) *element_type_ptr = CompilerType(GetASTContext(), args[0].getPointer()); return true; } return false; } bool SwiftASTContext::IsAggregateType(void *type) { if (type) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); auto referent_type = swift_can_type->getReferenceStorageReferent(); return (referent_type->is<swift::TupleType>() || referent_type->is<swift::BuiltinVectorType>() || referent_type->getAnyNominal()); } return false; } bool SwiftASTContext::IsVectorType(void *type, CompilerType *element_type, uint64_t *size) { return false; } bool SwiftASTContext::IsRuntimeGeneratedType(void *type) { return false; } bool SwiftASTContext::IsCharType(void *type) { return false; } bool SwiftASTContext::IsCompleteType(void *type) { return true; } bool SwiftASTContext::IsConst(void *type) { return false; } bool SwiftASTContext::IsCStringType(void *type, uint32_t &length) { return false; } bool SwiftASTContext::IsFunctionType(void *type, bool *is_variadic_ptr) { if (type) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); switch (type_kind) { case swift::TypeKind::Function: case swift::TypeKind::GenericFunction: return true; case swift::TypeKind::SILFunction: return false; // TODO: is this correct? default: return false; } } return false; } // Used to detect "Homogeneous Floating-point Aggregates" uint32_t SwiftASTContext::IsHomogeneousAggregate(void *type, CompilerType *base_type_ptr) { return 0; } size_t SwiftASTContext::GetNumberOfFunctionArguments(void *type) { if (type) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); auto func = swift::dyn_cast_or_null<swift::AnyFunctionType>( swift_can_type); if (func) { auto input = func.getInput(); // See comment in swift::AnyFunctionType for rationale here: // A function can take either a tuple or a parentype, but if a parentype // (i.e. (Foo)), then it will be reduced down to just Foo, so if the input // is not a tuple, that must mean there is only 1 input. auto tuple = swift::dyn_cast<swift::TupleType>(input); if (tuple) return tuple->getNumElements(); else return 1; } } return 0; } CompilerType SwiftASTContext::GetFunctionArgumentAtIndex(void *type, const size_t index) { VALID_OR_RETURN(CompilerType()); if (type) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); auto func = swift::dyn_cast<swift::AnyFunctionType>( swift_can_type); if (func) { auto input = func.getInput(); // See comment in swift::AnyFunctionType for rationale here: // A function can take either a tuple or a parentype, but if a parentype // (i.e. (Foo)), then it will be reduced down to just Foo, so if the input // is not a tuple, that must mean there is only 1 input. auto tuple = swift::dyn_cast<swift::TupleType>(input); if (tuple) { if (index < tuple->getNumElements()) return CompilerType(GetASTContext(), tuple->getElementType(index)); } else return CompilerType(GetASTContext(), input); } } return CompilerType(); } bool SwiftASTContext::IsFunctionPointerType(void *type) { return IsFunctionType(type, nullptr); // FIXME: think about this } bool SwiftASTContext::IsBlockPointerType( void *type, CompilerType *function_pointer_type_ptr) { return false; } bool SwiftASTContext::IsIntegerType(void *type, bool &is_signed) { return (GetTypeInfo(type, nullptr) & eTypeIsInteger); } bool SwiftASTContext::IsPointerType(void *type, CompilerType *pointee_type) { VALID_OR_RETURN(false); if (type) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); auto referent_type = swift_can_type->getReferenceStorageReferent(); return (referent_type->is<swift::BuiltinRawPointerType>() || referent_type->is<swift::BuiltinNativeObjectType>() || referent_type->is<swift::BuiltinUnsafeValueBufferType>() || referent_type->is<swift::BuiltinUnknownObjectType>() || referent_type->is<swift::BuiltinBridgeObjectType>()); } if (pointee_type) pointee_type->Clear(); return false; } bool SwiftASTContext::IsPointerOrReferenceType(void *type, CompilerType *pointee_type) { return IsPointerType(type, pointee_type) || IsReferenceType(type, pointee_type, nullptr); } bool SwiftASTContext::ShouldTreatScalarValueAsAddress( lldb::opaque_compiler_type_t type) { return Flags(GetTypeInfo(type, nullptr)) .AnySet(eTypeInstanceIsPointer | eTypeIsReference); } bool SwiftASTContext::IsReferenceType(void *type, CompilerType *pointee_type, bool *is_rvalue) { if (type) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); switch (type_kind) { case swift::TypeKind::InOut: case swift::TypeKind::LValue: if (pointee_type) *pointee_type = GetNonReferenceType(type); return true; default: break; } } if (pointee_type) pointee_type->Clear(); return false; } bool SwiftASTContext::IsFloatingPointType(void *type, uint32_t &count, bool &is_complex) { if (type) { if (GetTypeInfo(type, nullptr) & eTypeIsFloat) { count = 1; is_complex = false; return true; } } count = 0; is_complex = false; return false; } bool SwiftASTContext::IsDefined(void *type) { if (!type) return false; return true; } bool SwiftASTContext::IsPolymorphicClass(void *type) { return false; } bool SwiftASTContext::IsPossibleDynamicType(void *type, CompilerType *dynamic_pointee_type, bool check_cplusplus, bool check_objc, bool check_swift) { VALID_OR_RETURN(false); if (type && check_swift) { // FIXME: use the dynamic_pointee_type Flags type_flags(GetTypeInfo(type, nullptr)); if (type_flags.AnySet(eTypeIsArchetype | eTypeIsClass | eTypeIsProtocol)) return true; if (type_flags.AnySet(eTypeIsStructUnion | eTypeIsEnumeration | eTypeIsTuple)) { CompilerType compiler_type(GetASTContext(), GetCanonicalSwiftType(type)); return !SwiftASTContext::IsFullyRealized(compiler_type); } auto can_type = GetCanonicalSwiftType(type).getPointer(); if (can_type == GetASTContext()->TheRawPointerType.getPointer()) return true; if (can_type == GetASTContext()->TheUnknownObjectType.getPointer()) return true; if (can_type == GetASTContext()->TheNativeObjectType.getPointer()) return true; if (can_type == GetASTContext()->TheBridgeObjectType.getPointer()) return true; } if (dynamic_pointee_type) dynamic_pointee_type->Clear(); return false; } bool SwiftASTContext::IsScalarType(void *type) { if (!type) return false; return (GetTypeInfo(type, nullptr) & eTypeIsScalar) != 0; } bool SwiftASTContext::IsTypedefType(void *type) { if (!type) return false; swift::Type swift_type(GetSwiftType(type)); return swift::isa<swift::NameAliasType>(swift_type.getPointer()); } bool SwiftASTContext::IsVoidType(void *type) { VALID_OR_RETURN(false); if (!type) return false; return type == GetASTContext()->TheEmptyTupleType.getPointer(); } bool SwiftASTContext::IsArchetypeType(const CompilerType &compiler_type) { if (!compiler_type.IsValid()) return false; if (llvm::dyn_cast_or_null<SwiftASTContext>(compiler_type.GetTypeSystem())) { swift::Type swift_type(GetSwiftType(compiler_type)); return swift_type->is<swift::ArchetypeType>(); } return false; } bool SwiftASTContext::IsSelfArchetypeType(const CompilerType &compiler_type) { if (!compiler_type.IsValid()) return false; if (llvm::dyn_cast_or_null<SwiftASTContext>(compiler_type.GetTypeSystem())) { if (swift::isa<swift::ArchetypeType>( (swift::TypeBase *)compiler_type.GetOpaqueQualType())) { // Hack: Just assume if we have an archetype as the type of 'self', // it's going to be a protocol 'Self' type. return true; } } return false; } bool SwiftASTContext::IsPossibleZeroSizeType( const CompilerType &compiler_type) { if (!compiler_type.IsValid()) return false; if (auto ast = llvm::dyn_cast_or_null<SwiftASTContext>( compiler_type.GetTypeSystem())) return ast ->GetExtraTypeInformation( GetCanonicalSwiftType(compiler_type).getPointer()) .m_flags.m_is_zero_size; return false; } bool SwiftASTContext::IsErrorType(const CompilerType &compiler_type) { if (compiler_type.IsValid() && llvm::dyn_cast_or_null<SwiftASTContext>(compiler_type.GetTypeSystem())) { ProtocolInfo protocol_info; if (GetProtocolTypeInfo(compiler_type, protocol_info)) return protocol_info.m_is_errortype; return false; } return false; } CompilerType SwiftASTContext::GetReferentType(const CompilerType &compiler_type) { VALID_OR_RETURN(CompilerType()); if (compiler_type.IsValid() && llvm::dyn_cast_or_null<SwiftASTContext>(compiler_type.GetTypeSystem())) { swift::CanType swift_can_type(GetCanonicalSwiftType(compiler_type)); swift::TypeBase *swift_type = swift_can_type.getPointer(); if (swift_type && llvm::isa<swift::WeakStorageType>(swift_type)) return compiler_type; auto ref_type = swift_can_type->getReferenceStorageReferent(); return CompilerType(GetASTContext(), ref_type); } return CompilerType(); } bool SwiftASTContext::IsTrivialOptionSetType( const CompilerType &compiler_type) { if (compiler_type.IsValid() && llvm::dyn_cast_or_null<SwiftASTContext>(compiler_type.GetTypeSystem())) return GetExtraTypeInformation(compiler_type.GetOpaqueQualType()) .m_flags.m_is_trivial_option_set; return false; } bool SwiftASTContext::IsFullyRealized(const CompilerType &compiler_type) { if (!compiler_type.IsValid()) return false; if (auto ast = llvm::dyn_cast_or_null<SwiftASTContext>( compiler_type.GetTypeSystem())) { swift::CanType swift_can_type(GetCanonicalSwiftType(compiler_type)); if (swift::isa<swift::MetatypeType>(swift_can_type)) return true; return !swift_can_type->hasArchetype() && !swift_can_type->hasTypeParameter(); } return false; } bool SwiftASTContext::GetProtocolTypeInfo(const CompilerType &type, ProtocolInfo &protocol_info) { if (auto ast = llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); if (!swift_can_type.isExistentialType()) return false; swift::ExistentialLayout layout = swift_can_type.getExistentialLayout(); protocol_info.m_is_class_only = layout.requiresClass(); protocol_info.m_num_protocols = layout.getProtocols().size(); protocol_info.m_is_objc = layout.isObjC(); protocol_info.m_is_anyobject = layout.isAnyObject(); protocol_info.m_is_errortype = layout.isErrorExistential(); if (layout.superclass) { protocol_info.m_superclass = CompilerType(ast->GetASTContext(), layout.superclass.getPointer()); } unsigned num_witness_tables = 0; for (auto protoTy : layout.getProtocols()) { if (!protoTy->getDecl()->isObjC()) num_witness_tables++; } if (layout.isErrorExistential()) { // Error existential -- instance pointer only protocol_info.m_num_payload_words = 0; protocol_info.m_num_storage_words = 1; } else if (layout.requiresClass()) { // Class-constrained existential -- instance pointer plus witness tables protocol_info.m_num_payload_words = 0; protocol_info.m_num_storage_words = 1 + num_witness_tables; } else { // Opaque existential -- three words of inline storage, metadata and // witness tables protocol_info.m_num_payload_words = swift::NumWords_ValueBuffer; protocol_info.m_num_storage_words = swift::NumWords_ValueBuffer + 1 + num_witness_tables; } return true; } return false; } SwiftASTContext::TypeAllocationStrategy SwiftASTContext::GetAllocationStrategy(const CompilerType &type) { if (auto ast = llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) { const swift::irgen::TypeInfo *type_info = ast->GetSwiftTypeInfo(type.GetOpaqueQualType()); if (!type_info) return TypeAllocationStrategy::eUnknown; switch (type_info->getFixedPacking(ast->GetIRGenModule())) { case swift::irgen::FixedPacking::OffsetZero: return TypeAllocationStrategy::eInline; case swift::irgen::FixedPacking::Allocate: return TypeAllocationStrategy::ePointer; case swift::irgen::FixedPacking::Dynamic: return TypeAllocationStrategy::eDynamic; default: break; } } return TypeAllocationStrategy::eUnknown; } bool SwiftASTContext::IsBeingDefined(void *type) { return false; } bool SwiftASTContext::IsObjCObjectPointerType(const CompilerType &type, CompilerType *class_type_ptr) { if (!type) return false; swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); if (type_kind == swift::TypeKind::BuiltinNativeObject || type_kind == swift::TypeKind::BuiltinUnknownObject) return true; if (class_type_ptr) class_type_ptr->Clear(); return false; } //---------------------------------------------------------------------- // Type Completion //---------------------------------------------------------------------- bool SwiftASTContext::GetCompleteType(void *type) { return true; } ConstString SwiftASTContext::GetTypeName(void *type) { std::string type_name; if (type) { swift::Type swift_type(GetSwiftType(type)); swift::Type normalized_type = swift_type.transform([](swift::Type type) -> swift::Type { if (swift::SyntaxSugarType *syntax_sugar_type = swift::dyn_cast<swift::SyntaxSugarType>(type.getPointer())) { return syntax_sugar_type->getSinglyDesugaredType(); } if (swift::DictionaryType *dictionary_type = swift::dyn_cast<swift::DictionaryType>(type.getPointer())) { return dictionary_type->getSinglyDesugaredType(); } return type; }); swift::PrintOptions print_options; print_options.FullyQualifiedTypes = true; print_options.SynthesizeSugarOnTypes = false; type_name = normalized_type.getString(print_options); } return ConstString(type_name); } ConstString SwiftASTContext::GetDisplayTypeName(void *type) { std::string type_name(GetTypeName(type).AsCString("")); if (type) { swift::Type swift_type(GetSwiftType(type)); swift::PrintOptions print_options; print_options.FullyQualifiedTypes = false; print_options.SynthesizeSugarOnTypes = true; print_options.FullyQualifiedTypesIfAmbiguous = true; type_name = swift_type.getString(print_options); } return ConstString(type_name); } ConstString SwiftASTContext::GetTypeSymbolName(void *type) { swift::Type swift_type(GetSwiftType(type)); return GetTypeName(swift_type->getWithoutParens().getPointer()); } ConstString SwiftASTContext::GetMangledTypeName(void *type) { return GetMangledTypeName(GetSwiftType(type).getPointer()); } uint32_t SwiftASTContext::GetTypeInfo(void *type, CompilerType *pointee_or_element_clang_type) { VALID_OR_RETURN(0); if (!type) return 0; if (pointee_or_element_clang_type) pointee_or_element_clang_type->Clear(); swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); uint32_t swift_flags = eTypeIsSwift; switch (type_kind) { case swift::TypeKind::DependentMember: case swift::TypeKind::Error: case swift::TypeKind::GenericTypeParam: case swift::TypeKind::Module: case swift::TypeKind::TypeVariable: break; case swift::TypeKind::UnboundGeneric: swift_flags |= eTypeIsGeneric; break; case swift::TypeKind::GenericFunction: swift_flags |= eTypeIsGeneric; case swift::TypeKind::Function: swift_flags |= eTypeIsBuiltIn | eTypeHasValue | eTypeIsScalar | eTypeInstanceIsPointer; break; case swift::TypeKind::BuiltinInteger: swift_flags |= eTypeIsBuiltIn | eTypeHasValue | eTypeIsScalar | eTypeIsInteger; break; case swift::TypeKind::BuiltinFloat: swift_flags |= eTypeIsBuiltIn | eTypeHasValue | eTypeIsScalar | eTypeIsFloat; break; case swift::TypeKind::BuiltinRawPointer: swift_flags |= eTypeIsBuiltIn | eTypeHasChildren | eTypeIsPointer | eTypeIsScalar | eTypeHasValue; break; case swift::TypeKind::BuiltinNativeObject: swift_flags |= eTypeIsBuiltIn | eTypeHasChildren | eTypeIsPointer | eTypeIsScalar | eTypeHasValue; break; case swift::TypeKind::BuiltinUnknownObject: swift_flags |= eTypeIsBuiltIn | eTypeHasChildren | eTypeIsPointer | eTypeIsScalar | eTypeHasValue | eTypeIsObjC; break; case swift::TypeKind::BuiltinBridgeObject: swift_flags |= eTypeIsBuiltIn | eTypeHasChildren | eTypeIsPointer | eTypeIsScalar | eTypeHasValue | eTypeIsObjC; break; case swift::TypeKind::BuiltinUnsafeValueBuffer: swift_flags |= eTypeIsBuiltIn | eTypeIsPointer | eTypeIsScalar | eTypeHasValue; break; case swift::TypeKind::BuiltinVector: // TODO: OR in eTypeIsFloat or eTypeIsInteger as needed return eTypeIsBuiltIn | eTypeHasChildren | eTypeIsVector; break; case swift::TypeKind::Tuple: swift_flags |= eTypeHasChildren | eTypeIsTuple; break; case swift::TypeKind::UnmanagedStorage: case swift::TypeKind::UnownedStorage: case swift::TypeKind::WeakStorage: swift_flags |= CompilerType(GetASTContext(), swift_can_type->getReferenceStorageReferent()) .GetTypeInfo(pointee_or_element_clang_type); break; case swift::TypeKind::BoundGenericEnum: swift_flags |= eTypeIsGeneric | eTypeIsBound; case swift::TypeKind::Enum: { SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type); if (cached_enum_info) { if (cached_enum_info->GetNumElementsWithPayload() == 0) swift_flags |= eTypeHasValue | eTypeIsEnumeration; else swift_flags |= eTypeHasValue | eTypeIsEnumeration | eTypeHasChildren; } else swift_flags |= eTypeIsEnumeration; } break; case swift::TypeKind::BoundGenericStruct: swift_flags |= eTypeIsGeneric | eTypeIsBound; case swift::TypeKind::Struct: swift_flags |= eTypeHasChildren | eTypeIsStructUnion; break; case swift::TypeKind::BoundGenericClass: swift_flags |= eTypeIsGeneric | eTypeIsBound; case swift::TypeKind::Class: swift_flags |= eTypeHasChildren | eTypeIsClass | eTypeHasValue | eTypeInstanceIsPointer; break; case swift::TypeKind::Protocol: case swift::TypeKind::ProtocolComposition: swift_flags |= eTypeHasChildren | eTypeIsStructUnion | eTypeIsProtocol; break; case swift::TypeKind::ExistentialMetatype: case swift::TypeKind::Metatype: swift_flags |= eTypeIsMetatype | eTypeHasValue; break; case swift::TypeKind::Archetype: swift_flags |= eTypeHasValue | eTypeIsScalar | eTypeIsPointer | eTypeIsArchetype; break; case swift::TypeKind::LValue: if (pointee_or_element_clang_type) *pointee_or_element_clang_type = GetNonReferenceType(type); swift_flags |= eTypeHasChildren | eTypeIsReference | eTypeHasValue; break; case swift::TypeKind::InOut: case swift::TypeKind::DynamicSelf: case swift::TypeKind::SILBox: case swift::TypeKind::SILFunction: case swift::TypeKind::SILBlockStorage: case swift::TypeKind::Unresolved: break; case swift::TypeKind::Optional: case swift::TypeKind::NameAlias: case swift::TypeKind::Paren: case swift::TypeKind::Dictionary: case swift::TypeKind::ArraySlice: assert(false && "Not a canonical type"); break; } return swift_flags; } lldb::LanguageType SwiftASTContext::GetMinimumLanguage(void *type) { if (!type) return lldb::eLanguageTypeC; return lldb::eLanguageTypeSwift; } lldb::TypeClass SwiftASTContext::GetTypeClass(void *type) { VALID_OR_RETURN(lldb::eTypeClassInvalid); if (!type) return lldb::eTypeClassInvalid; swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); switch (type_kind) { case swift::TypeKind::Error: return lldb::eTypeClassOther; case swift::TypeKind::BuiltinInteger: return lldb::eTypeClassBuiltin; case swift::TypeKind::BuiltinFloat: return lldb::eTypeClassBuiltin; case swift::TypeKind::BuiltinRawPointer: return lldb::eTypeClassBuiltin; case swift::TypeKind::BuiltinNativeObject: return lldb::eTypeClassBuiltin; case swift::TypeKind::BuiltinUnsafeValueBuffer: return lldb::eTypeClassBuiltin; case swift::TypeKind::BuiltinUnknownObject: return lldb::eTypeClassBuiltin; case swift::TypeKind::BuiltinBridgeObject: return lldb::eTypeClassBuiltin; case swift::TypeKind::BuiltinVector: return lldb::eTypeClassVector; case swift::TypeKind::Tuple: return lldb::eTypeClassArray; case swift::TypeKind::UnmanagedStorage: case swift::TypeKind::UnownedStorage: case swift::TypeKind::WeakStorage: return CompilerType(GetASTContext(), swift_can_type->getReferenceStorageReferent()) .GetTypeClass(); case swift::TypeKind::GenericTypeParam: return lldb::eTypeClassOther; case swift::TypeKind::DependentMember: return lldb::eTypeClassOther; case swift::TypeKind::Enum: return lldb::eTypeClassUnion; case swift::TypeKind::Struct: return lldb::eTypeClassStruct; case swift::TypeKind::Class: return lldb::eTypeClassClass; case swift::TypeKind::Protocol: return lldb::eTypeClassOther; case swift::TypeKind::Metatype: return lldb::eTypeClassOther; case swift::TypeKind::Module: return lldb::eTypeClassOther; case swift::TypeKind::Archetype: return lldb::eTypeClassOther; case swift::TypeKind::Function: return lldb::eTypeClassFunction; case swift::TypeKind::GenericFunction: return lldb::eTypeClassFunction; case swift::TypeKind::ProtocolComposition: return lldb::eTypeClassOther; case swift::TypeKind::LValue: return lldb::eTypeClassReference; case swift::TypeKind::UnboundGeneric: return lldb::eTypeClassOther; case swift::TypeKind::BoundGenericClass: return lldb::eTypeClassClass; case swift::TypeKind::BoundGenericEnum: return lldb::eTypeClassUnion; case swift::TypeKind::BoundGenericStruct: return lldb::eTypeClassStruct; case swift::TypeKind::TypeVariable: return lldb::eTypeClassOther; case swift::TypeKind::ExistentialMetatype: return lldb::eTypeClassOther; case swift::TypeKind::DynamicSelf: return lldb::eTypeClassOther; case swift::TypeKind::SILBox: return lldb::eTypeClassOther; case swift::TypeKind::SILFunction: return lldb::eTypeClassFunction; case swift::TypeKind::SILBlockStorage: return lldb::eTypeClassOther; case swift::TypeKind::InOut: return lldb::eTypeClassOther; case swift::TypeKind::Unresolved: return lldb::eTypeClassOther; case swift::TypeKind::Optional: case swift::TypeKind::NameAlias: case swift::TypeKind::Paren: case swift::TypeKind::Dictionary: case swift::TypeKind::ArraySlice: assert(false && "Not a canonical type"); break; } return lldb::eTypeClassOther; } unsigned SwiftASTContext::GetTypeQualifiers(void *type) { return 0; } //---------------------------------------------------------------------- // Creating related types //---------------------------------------------------------------------- CompilerType SwiftASTContext::GetArrayElementType(void *type, uint64_t *stride) { VALID_OR_RETURN(CompilerType()); CompilerType element_type; if (type) { swift::CanType swift_type(GetCanonicalSwiftType(type)); // There are a couple of structs that mean "Array" in Swift: // Array<T> // NativeArray<T> // Slice<T> // Treat them as arrays for convenience sake. swift::BoundGenericStructType *boundGenericStructType( swift_type->getAs<swift::BoundGenericStructType>()); if (boundGenericStructType) { auto args = boundGenericStructType->getGenericArgs(); swift::StructDecl *decl = boundGenericStructType->getDecl(); if (args.size() == 1 && decl->getModuleContext()->isStdlibModule()) { const char *declname = decl->getName().get(); if (0 == strcmp(declname, "NativeArray") || 0 == strcmp(declname, "Array") || 0 == strcmp(declname, "ArraySlice")) element_type = CompilerType(GetASTContext(), args[0].getPointer()); } } } return element_type; } CompilerType SwiftASTContext::GetCanonicalType(void *type) { VALID_OR_RETURN(CompilerType()); if (type) return CompilerType(GetASTContext(), GetCanonicalSwiftType(type).getPointer()); return CompilerType(); } CompilerType SwiftASTContext::GetInstanceType(void *type) { VALID_OR_RETURN(CompilerType()); if (!type) return CompilerType(); swift::CanType swift_can_type(GetCanonicalSwiftType(type)); switch (swift_can_type->getKind()) { case swift::TypeKind::ExistentialMetatype: case swift::TypeKind::Metatype: { auto metatype_type = swift::dyn_cast<swift::AnyMetatypeType>(swift_can_type); if (metatype_type) return CompilerType(GetASTContext(), metatype_type.getInstanceType().getPointer()); return CompilerType(); } default: break; } return CompilerType(GetASTContext(), GetSwiftType(type)); } CompilerType SwiftASTContext::GetFullyUnqualifiedType(void *type) { VALID_OR_RETURN(CompilerType()); return CompilerType(GetASTContext(), GetSwiftType(type)); } int SwiftASTContext::GetFunctionArgumentCount(void *type) { return GetNumberOfFunctionArguments(type); } CompilerType SwiftASTContext::GetFunctionArgumentTypeAtIndex(void *type, size_t idx) { return GetFunctionArgumentAtIndex(type, idx); } CompilerType SwiftASTContext::GetFunctionReturnType(void *type) { VALID_OR_RETURN(CompilerType()); if (type) { auto func = swift::dyn_cast<swift::AnyFunctionType>( GetCanonicalSwiftType(type)); if (func) return CompilerType(GetASTContext(), func.getResult().getPointer()); } return CompilerType(); } size_t SwiftASTContext::GetNumMemberFunctions(void *type) { size_t num_functions = 0; if (type) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); auto nominal_decl = swift_can_type.getAnyNominal(); if (nominal_decl) { auto iter = nominal_decl->getMembers().begin(); auto end = nominal_decl->getMembers().end(); for (; iter != end; iter++) { switch (iter->getKind()) { case swift::DeclKind::Constructor: case swift::DeclKind::Destructor: case swift::DeclKind::Func: num_functions += 1; break; default: break; } } } } return num_functions; } TypeMemberFunctionImpl SwiftASTContext::GetMemberFunctionAtIndex(void *type, size_t idx) { VALID_OR_RETURN(TypeMemberFunctionImpl()); std::string name(""); CompilerType result_type; MemberFunctionKind kind(MemberFunctionKind::eMemberFunctionKindUnknown); swift::AbstractFunctionDecl *the_decl_we_care_about = nullptr; if (type) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); auto nominal_decl = swift_can_type.getAnyNominal(); if (nominal_decl) { auto iter = nominal_decl->getMembers().begin(); auto end = nominal_decl->getMembers().end(); for (; iter != end; iter++) { auto decl_kind = iter->getKind(); switch (decl_kind) { case swift::DeclKind::Constructor: case swift::DeclKind::Destructor: case swift::DeclKind::Func: { if (idx == 0) { swift::AbstractFunctionDecl *abstract_func_decl = llvm::dyn_cast_or_null<swift::AbstractFunctionDecl>(*iter); if (abstract_func_decl) { switch (decl_kind) { case swift::DeclKind::Constructor: name.clear(); kind = lldb::eMemberFunctionKindConstructor; the_decl_we_care_about = abstract_func_decl; break; case swift::DeclKind::Destructor: name.clear(); kind = lldb::eMemberFunctionKindDestructor; the_decl_we_care_about = abstract_func_decl; break; case swift::DeclKind::Func: default: // I know that this can only be one of three kinds // since I am here.. { swift::FuncDecl *func_decl = llvm::dyn_cast<swift::FuncDecl>(*iter); if (func_decl) { if (func_decl->getName().empty()) name.clear(); else name.assign(func_decl->getName().get()); if (func_decl->isStatic()) kind = lldb::eMemberFunctionKindStaticMethod; else kind = lldb::eMemberFunctionKindInstanceMethod; the_decl_we_care_about = func_decl; } } } result_type = CompilerType(GetASTContext(), abstract_func_decl->getInterfaceType().getPointer()); } } else --idx; } break; default: break; } } } } if (type && the_decl_we_care_about && (kind != eMemberFunctionKindUnknown)) return TypeMemberFunctionImpl( result_type, CompilerDecl(this, the_decl_we_care_about), name, kind); return TypeMemberFunctionImpl(); } CompilerType SwiftASTContext::GetLValueReferenceType(void *type) { VALID_OR_RETURN(CompilerType()); if (type) return CompilerType(GetASTContext(), swift::LValueType::get(GetSwiftType(type))); return CompilerType(); } CompilerType SwiftASTContext::GetRValueReferenceType(void *type) { return CompilerType(); } CompilerType SwiftASTContext::GetNonReferenceType(void *type) { VALID_OR_RETURN(CompilerType()); if (type) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); swift::LValueType *lvalue = swift_can_type->getAs<swift::LValueType>(); if (lvalue) return CompilerType(GetASTContext(), lvalue->getObjectType().getPointer()); swift::InOutType *inout = swift_can_type->getAs<swift::InOutType>(); if (inout) return CompilerType(GetASTContext(), inout->getObjectType().getPointer()); } return CompilerType(); } CompilerType SwiftASTContext::GetPointeeType(void *type) { return CompilerType(); } CompilerType SwiftASTContext::GetPointerType(void *type) { VALID_OR_RETURN(CompilerType()); if (type) { swift::Type swift_type(::GetSwiftType(type)); const swift::TypeKind type_kind = swift_type->getKind(); if (type_kind == swift::TypeKind::BuiltinRawPointer) return CompilerType(GetASTContext(), swift_type); } return CompilerType(); } CompilerType SwiftASTContext::GetTypedefedType(void *type) { VALID_OR_RETURN(CompilerType()); if (type) { swift::Type swift_type(::GetSwiftType(type)); swift::NameAliasType *name_alias_type = swift::dyn_cast<swift::NameAliasType>(swift_type.getPointer()); if (name_alias_type) { return CompilerType(GetASTContext(), name_alias_type->getSinglyDesugaredType()); } } return CompilerType(); } CompilerType SwiftASTContext::GetUnboundType(lldb::opaque_compiler_type_t type) { VALID_OR_RETURN(CompilerType()); if (type) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); swift::BoundGenericType *bound_generic_type = swift_can_type->getAs<swift::BoundGenericType>(); if (bound_generic_type) { swift::NominalTypeDecl *nominal_type_decl = bound_generic_type->getDecl(); if (nominal_type_decl) return CompilerType(GetASTContext(), nominal_type_decl->getDeclaredType()); } } return CompilerType(GetASTContext(), GetSwiftType(type)); } //---------------------------------------------------------------------- // Create related types using the current type's AST //---------------------------------------------------------------------- CompilerType SwiftASTContext::GetBasicTypeFromAST(lldb::BasicType basic_type) { return CompilerType(); } CompilerType SwiftASTContext::GetIntTypeFromBitSize(size_t bit_size, bool is_signed) { return CompilerType(); } CompilerType SwiftASTContext::GetFloatTypeFromBitSize(size_t bit_size) { return CompilerType(); } //---------------------------------------------------------------------- // Exploring the type //---------------------------------------------------------------------- const swift::irgen::TypeInfo * SwiftASTContext::GetSwiftTypeInfo(swift::Type container_type, swift::VarDecl *item_decl) { VALID_OR_RETURN(nullptr); if (container_type && item_decl) { auto &irgen_module = GetIRGenModule(); swift::CanType container_can_type( GetCanonicalSwiftType(container_type.getPointer())); swift::SILType lowered_container_type = irgen_module.getLoweredType(container_can_type); swift::SILType lowered_field_type = lowered_container_type.getFieldType(item_decl, *GetSILModule()); return &irgen_module.getTypeInfo(lowered_field_type); } return nullptr; } const swift::irgen::TypeInfo *SwiftASTContext::GetSwiftTypeInfo(void *type) { VALID_OR_RETURN(nullptr); if (type) { auto &irgen_module = GetIRGenModule(); swift::CanType swift_can_type(GetCanonicalSwiftType(type)); if (swift_can_type->hasTypeParameter()) return nullptr; swift::SILType swift_sil_type = irgen_module.getLoweredType( swift_can_type); return &irgen_module.getTypeInfo(swift_sil_type); } return nullptr; } const swift::irgen::FixedTypeInfo * SwiftASTContext::GetSwiftFixedTypeInfo(void *type) { VALID_OR_RETURN(nullptr); const swift::irgen::TypeInfo *type_info = GetSwiftTypeInfo(type); if (type_info) { if (type_info->isFixedSize()) return swift::cast<const swift::irgen::FixedTypeInfo>(type_info); } return nullptr; } uint64_t SwiftASTContext::GetBitSize(lldb::opaque_compiler_type_t type, ExecutionContextScope *exe_scope) { if (type) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); switch (type_kind) { case swift::TypeKind::Archetype: case swift::TypeKind::LValue: case swift::TypeKind::UnboundGeneric: case swift::TypeKind::GenericFunction: case swift::TypeKind::Function: return GetPointerByteSize() * 8; default: break; } const swift::irgen::FixedTypeInfo *fixed_type_info = GetSwiftFixedTypeInfo(type); if (fixed_type_info) return fixed_type_info->getFixedSize().getValue() * 8; } return 0; } uint64_t SwiftASTContext::GetByteStride(lldb::opaque_compiler_type_t type) { if (type) { const swift::irgen::FixedTypeInfo *fixed_type_info = GetSwiftFixedTypeInfo(type); if (fixed_type_info) return fixed_type_info->getFixedStride().getValue(); } return 0; } size_t SwiftASTContext::GetTypeBitAlign(void *type) { if (type) { const swift::irgen::FixedTypeInfo *fixed_type_info = GetSwiftFixedTypeInfo(type); if (fixed_type_info) return fixed_type_info->getFixedAlignment().getValue(); } return 0; } lldb::Encoding SwiftASTContext::GetEncoding(void *type, uint64_t &count) { VALID_OR_RETURN(lldb::eEncodingInvalid); if (!type) return lldb::eEncodingInvalid; count = 1; swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); switch (type_kind) { case swift::TypeKind::Error: break; case swift::TypeKind::BuiltinInteger: return lldb::eEncodingSint; // TODO: detect if an integer is unsigned case swift::TypeKind::BuiltinFloat: return lldb::eEncodingIEEE754; // TODO: detect if an integer is unsigned case swift::TypeKind::Archetype: case swift::TypeKind::BuiltinRawPointer: case swift::TypeKind::BuiltinNativeObject: case swift::TypeKind::BuiltinUnsafeValueBuffer: case swift::TypeKind::BuiltinUnknownObject: case swift::TypeKind::BuiltinBridgeObject: case swift::TypeKind::Class: // Classes are pointers in swift... case swift::TypeKind::BoundGenericClass: return lldb::eEncodingUint; case swift::TypeKind::BuiltinVector: break; case swift::TypeKind::Tuple: break; case swift::TypeKind::UnmanagedStorage: case swift::TypeKind::UnownedStorage: case swift::TypeKind::WeakStorage: return CompilerType(GetASTContext(), swift_can_type->getReferenceStorageReferent()) .GetEncoding(count); case swift::TypeKind::GenericTypeParam: case swift::TypeKind::DependentMember: break; case swift::TypeKind::ExistentialMetatype: case swift::TypeKind::Metatype: return lldb::eEncodingUint; case swift::TypeKind::GenericFunction: case swift::TypeKind::Function: return lldb::eEncodingUint; case swift::TypeKind::Enum: case swift::TypeKind::BoundGenericEnum: break; case swift::TypeKind::Struct: case swift::TypeKind::Protocol: case swift::TypeKind::Module: case swift::TypeKind::ProtocolComposition: break; case swift::TypeKind::LValue: return lldb::eEncodingUint; case swift::TypeKind::UnboundGeneric: case swift::TypeKind::BoundGenericStruct: case swift::TypeKind::TypeVariable: case swift::TypeKind::DynamicSelf: case swift::TypeKind::SILBox: case swift::TypeKind::SILFunction: case swift::TypeKind::SILBlockStorage: case swift::TypeKind::InOut: case swift::TypeKind::Unresolved: break; case swift::TypeKind::Optional: case swift::TypeKind::NameAlias: case swift::TypeKind::Paren: case swift::TypeKind::Dictionary: case swift::TypeKind::ArraySlice: assert(false && "Not a canonical type"); break; } count = 0; return lldb::eEncodingInvalid; } lldb::Format SwiftASTContext::GetFormat(void *type) { VALID_OR_RETURN(lldb::eFormatInvalid); if (!type) return lldb::eFormatDefault; swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); switch (type_kind) { case swift::TypeKind::Error: break; case swift::TypeKind::BuiltinInteger: return eFormatDecimal; // TODO: detect if an integer is unsigned case swift::TypeKind::BuiltinFloat: return eFormatFloat; // TODO: detect if an integer is unsigned case swift::TypeKind::BuiltinRawPointer: case swift::TypeKind::BuiltinNativeObject: case swift::TypeKind::BuiltinUnknownObject: case swift::TypeKind::BuiltinUnsafeValueBuffer: case swift::TypeKind::BuiltinBridgeObject: case swift::TypeKind::Archetype: return eFormatAddressInfo; // Classes are always pointers in swift... case swift::TypeKind::Class: case swift::TypeKind::BoundGenericClass: return eFormatHex; case swift::TypeKind::BuiltinVector: break; case swift::TypeKind::Tuple: break; case swift::TypeKind::UnmanagedStorage: case swift::TypeKind::UnownedStorage: case swift::TypeKind::WeakStorage: return CompilerType(GetASTContext(), swift_can_type->getReferenceStorageReferent()) .GetFormat(); case swift::TypeKind::GenericTypeParam: case swift::TypeKind::DependentMember: break; case swift::TypeKind::Enum: case swift::TypeKind::BoundGenericEnum: return eFormatUnsigned; case swift::TypeKind::GenericFunction: case swift::TypeKind::Function: return lldb::eFormatAddressInfo; case swift::TypeKind::Struct: case swift::TypeKind::Protocol: case swift::TypeKind::Metatype: case swift::TypeKind::Module: case swift::TypeKind::ProtocolComposition: break; case swift::TypeKind::LValue: return lldb::eFormatHex; case swift::TypeKind::UnboundGeneric: case swift::TypeKind::BoundGenericStruct: case swift::TypeKind::TypeVariable: case swift::TypeKind::ExistentialMetatype: case swift::TypeKind::DynamicSelf: case swift::TypeKind::SILBox: case swift::TypeKind::SILFunction: case swift::TypeKind::SILBlockStorage: case swift::TypeKind::InOut: case swift::TypeKind::Unresolved: break; case swift::TypeKind::Optional: case swift::TypeKind::NameAlias: case swift::TypeKind::Paren: case swift::TypeKind::Dictionary: case swift::TypeKind::ArraySlice: assert(false && "Not a canonical type"); break; } // We don't know hot to display this type... return lldb::eFormatBytes; } uint32_t SwiftASTContext::GetNumChildren(void *type, bool omit_empty_base_classes) { VALID_OR_RETURN(0); if (!type) return 0; uint32_t num_children = 0; swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); switch (type_kind) { case swift::TypeKind::Error: case swift::TypeKind::BuiltinInteger: case swift::TypeKind::BuiltinFloat: case swift::TypeKind::BuiltinRawPointer: case swift::TypeKind::BuiltinNativeObject: case swift::TypeKind::BuiltinUnknownObject: case swift::TypeKind::BuiltinUnsafeValueBuffer: case swift::TypeKind::BuiltinBridgeObject: case swift::TypeKind::BuiltinVector: case swift::TypeKind::Module: case swift::TypeKind::Function: case swift::TypeKind::GenericFunction: case swift::TypeKind::DynamicSelf: case swift::TypeKind::SILBox: case swift::TypeKind::SILFunction: case swift::TypeKind::InOut: break; case swift::TypeKind::UnmanagedStorage: case swift::TypeKind::UnownedStorage: case swift::TypeKind::WeakStorage: return CompilerType(GetASTContext(), swift_can_type->getReferenceStorageReferent()) .GetNumChildren(omit_empty_base_classes); case swift::TypeKind::GenericTypeParam: case swift::TypeKind::DependentMember: break; case swift::TypeKind::Enum: case swift::TypeKind::BoundGenericEnum: { SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type); if (cached_enum_info) return cached_enum_info->GetNumElementsWithPayload(); } break; case swift::TypeKind::Tuple: case swift::TypeKind::Struct: case swift::TypeKind::BoundGenericStruct: return GetNumFields(type); case swift::TypeKind::Class: case swift::TypeKind::BoundGenericClass: { auto class_decl = swift_can_type->getClassOrBoundGenericClass(); return (class_decl->hasSuperclass() ? 1 : 0) + GetNumFields(type); } case swift::TypeKind::Protocol: case swift::TypeKind::ProtocolComposition: { ProtocolInfo protocol_info; if (!GetProtocolTypeInfo( CompilerType(GetASTContext(), GetSwiftType(type)), protocol_info)) break; return protocol_info.m_num_storage_words; } case swift::TypeKind::ExistentialMetatype: case swift::TypeKind::Metatype: case swift::TypeKind::Archetype: return 0; case swift::TypeKind::LValue: { swift::LValueType *lvalue_type = swift_can_type->castTo<swift::LValueType>(); swift::TypeBase *deref_type = lvalue_type->getObjectType().getPointer(); uint32_t num_pointee_children = CompilerType(GetASTContext(), deref_type) .GetNumChildren(omit_empty_base_classes); // If this type points to a simple type (or to a class), then it has 1 child if (num_pointee_children == 0 || deref_type->getClassOrBoundGenericClass()) num_children = 1; else num_children = num_pointee_children; } break; case swift::TypeKind::UnboundGeneric: break; case swift::TypeKind::TypeVariable: break; case swift::TypeKind::SILBlockStorage: case swift::TypeKind::Unresolved: break; case swift::TypeKind::Optional: case swift::TypeKind::NameAlias: case swift::TypeKind::Paren: case swift::TypeKind::Dictionary: case swift::TypeKind::ArraySlice: assert(false && "Not a canonical type"); break; } return num_children; } lldb::BasicType SwiftASTContext::GetBasicTypeEnumeration(void *type) { return eBasicTypeInvalid; } #pragma mark Aggregate Types uint32_t SwiftASTContext::GetNumDirectBaseClasses(void *opaque_type) { if (!opaque_type) return 0; swift::CanType swift_can_type(GetCanonicalSwiftType(opaque_type)); swift::ClassDecl *class_decl = swift_can_type->getClassOrBoundGenericClass(); if (class_decl) { if (class_decl->hasSuperclass()) return 1; } return 0; } uint32_t SwiftASTContext::GetNumVirtualBaseClasses(void *opaque_type) { return 0; } uint32_t SwiftASTContext::GetNumFields(void *type) { VALID_OR_RETURN(0); if (!type) return 0; uint32_t count = 0; swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); switch (type_kind) { case swift::TypeKind::Error: case swift::TypeKind::BuiltinInteger: case swift::TypeKind::BuiltinFloat: case swift::TypeKind::BuiltinRawPointer: case swift::TypeKind::BuiltinNativeObject: case swift::TypeKind::BuiltinUnknownObject: case swift::TypeKind::BuiltinUnsafeValueBuffer: case swift::TypeKind::BuiltinBridgeObject: case swift::TypeKind::BuiltinVector: break; case swift::TypeKind::UnmanagedStorage: case swift::TypeKind::UnownedStorage: case swift::TypeKind::WeakStorage: return CompilerType(GetASTContext(), swift_can_type->getReferenceStorageReferent()) .GetNumFields(); case swift::TypeKind::GenericTypeParam: case swift::TypeKind::DependentMember: break; case swift::TypeKind::Enum: case swift::TypeKind::BoundGenericEnum: { SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type); if (cached_enum_info) return cached_enum_info->GetNumElementsWithPayload(); } break; case swift::TypeKind::Tuple: return cast<swift::TupleType>(swift_can_type)->getNumElements(); case swift::TypeKind::Struct: case swift::TypeKind::Class: case swift::TypeKind::BoundGenericClass: case swift::TypeKind::BoundGenericStruct: { auto nominal = swift_can_type->getAnyNominal(); return GetStoredProperties(nominal).size(); } case swift::TypeKind::Protocol: case swift::TypeKind::ProtocolComposition: return GetNumChildren(type, /*omit_empty_base_classes=*/false); case swift::TypeKind::ExistentialMetatype: case swift::TypeKind::Metatype: return 0; case swift::TypeKind::Module: case swift::TypeKind::Archetype: case swift::TypeKind::Function: case swift::TypeKind::GenericFunction: case swift::TypeKind::LValue: case swift::TypeKind::UnboundGeneric: case swift::TypeKind::TypeVariable: case swift::TypeKind::DynamicSelf: case swift::TypeKind::SILBox: case swift::TypeKind::SILFunction: case swift::TypeKind::SILBlockStorage: case swift::TypeKind::InOut: case swift::TypeKind::Unresolved: break; case swift::TypeKind::Optional: case swift::TypeKind::NameAlias: case swift::TypeKind::Paren: case swift::TypeKind::Dictionary: case swift::TypeKind::ArraySlice: assert(false && "Not a canonical type"); break; } return count; } CompilerType SwiftASTContext::GetDirectBaseClassAtIndex(void *opaque_type, size_t idx, uint32_t *bit_offset_ptr) { VALID_OR_RETURN(CompilerType()); if (opaque_type) { swift::CanType swift_can_type(GetCanonicalSwiftType(opaque_type)); swift::ClassDecl *class_decl = swift_can_type->getClassOrBoundGenericClass(); if (class_decl) { swift::Type base_class_type = class_decl->getSuperclass(); if (base_class_type) return CompilerType(GetASTContext(), base_class_type.getPointer()); } } return CompilerType(); } CompilerType SwiftASTContext::GetVirtualBaseClassAtIndex(void *opaque_type, size_t idx, uint32_t *bit_offset_ptr) { return CompilerType(); } /// Retrieve the printable name of a tuple element. static std::string GetTupleElementName(const swift::TupleType *tuple_type, unsigned index, llvm::StringRef printed_index = "") { const auto &element = tuple_type->getElement(index); // Use the element name if there is one. if (!element.getName().empty()) return element.getName().str(); // If we know the printed index already, use that. if (!printed_index.empty()) return printed_index; // Print the index and return that. std::string str; llvm::raw_string_ostream(str) << index; return str; } /// Retrieve the printable name of a type referenced as a superclass. static std::string GetSuperclassName(const CompilerType &superclass_type) { return superclass_type.GetUnboundType().GetTypeName() .AsCString("<no type name>"); } /// Retrieve the type and name of a child of an existential type. static std::pair<CompilerType, std::string> GetExistentialTypeChild(swift::ASTContext *swift_ast_ctx, CompilerType type, const SwiftASTContext::ProtocolInfo &protocol_info, unsigned idx) { assert(idx < protocol_info.m_num_storage_words && "caller is responsible for validating index"); // A payload word for a non-class, non-error existential. if (idx < protocol_info.m_num_payload_words) { std::string name; llvm::raw_string_ostream(name) << "payload_data_" << idx; auto raw_pointer = swift_ast_ctx->TheRawPointerType; return { CompilerType(swift_ast_ctx, raw_pointer.getPointer()), std::move(name) }; } // The instance for a class-bound existential. if (idx == 0 && protocol_info.m_is_class_only) { CompilerType class_type; if (protocol_info.m_superclass) { class_type = protocol_info.m_superclass; } else { auto raw_pointer = swift_ast_ctx->TheRawPointerType; class_type = CompilerType(swift_ast_ctx, raw_pointer.getPointer()); } return { class_type, "instance" }; } // The instance for an error existential. if (idx == 0 && protocol_info.m_is_errortype) { auto raw_pointer = swift_ast_ctx->TheRawPointerType; return { CompilerType(swift_ast_ctx, raw_pointer.getPointer()), "error_instance" }; } // The metatype for a non-class, non-error existential. if (idx && idx == protocol_info.m_num_payload_words) { // The metatype for a non-class, non-error existential. auto any_metatype = swift::ExistentialMetatypeType::get(swift_ast_ctx->TheAnyType); return { CompilerType(swift_ast_ctx, any_metatype), "instance_type" }; } // A witness table. Figure out which protocol it corresponds to. unsigned witness_table_idx = idx - protocol_info.m_num_payload_words - 1; swift::CanType swift_can_type(GetCanonicalSwiftType(type)); swift::ExistentialLayout layout = swift_can_type.getExistentialLayout(); std::string name; for (auto protoType : layout.getProtocols()) { auto proto = protoType->getDecl(); if (proto->isObjC()) continue; if (witness_table_idx == 0) { llvm::raw_string_ostream(name) << "witness_table_" << proto->getBaseName().userFacingName(); break; } --witness_table_idx; } auto raw_pointer = swift_ast_ctx->TheRawPointerType; return { CompilerType(swift_ast_ctx, raw_pointer.getPointer()), std::move(name) }; } CompilerType SwiftASTContext::GetFieldAtIndex(void *type, size_t idx, std::string &name, uint64_t *bit_offset_ptr, uint32_t *bitfield_bit_size_ptr, bool *is_bitfield_ptr) { VALID_OR_RETURN(CompilerType()); if (!type) return CompilerType(); swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); switch (type_kind) { case swift::TypeKind::Error: case swift::TypeKind::BuiltinInteger: case swift::TypeKind::BuiltinFloat: case swift::TypeKind::BuiltinRawPointer: case swift::TypeKind::BuiltinNativeObject: case swift::TypeKind::BuiltinUnsafeValueBuffer: case swift::TypeKind::BuiltinUnknownObject: case swift::TypeKind::BuiltinBridgeObject: case swift::TypeKind::BuiltinVector: break; case swift::TypeKind::UnmanagedStorage: case swift::TypeKind::UnownedStorage: case swift::TypeKind::WeakStorage: return CompilerType(GetASTContext(), swift_can_type->getReferenceStorageReferent()) .GetFieldAtIndex(idx, name, bit_offset_ptr, bitfield_bit_size_ptr, is_bitfield_ptr); case swift::TypeKind::GenericTypeParam: case swift::TypeKind::DependentMember: break; case swift::TypeKind::Enum: case swift::TypeKind::BoundGenericEnum: { SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type); if (cached_enum_info && idx < cached_enum_info->GetNumElementsWithPayload()) { const SwiftEnumDescriptor::ElementInfo *enum_element_info = cached_enum_info->GetElementWithPayloadAtIndex(idx); name.assign(enum_element_info->name.GetCString()); if (bit_offset_ptr) *bit_offset_ptr = 0; if (bitfield_bit_size_ptr) *bitfield_bit_size_ptr = 0; if (is_bitfield_ptr) *is_bitfield_ptr = false; return enum_element_info->payload_type; } } break; case swift::TypeKind::Tuple: { auto tuple_type = cast<swift::TupleType>(swift_can_type); if (idx >= tuple_type->getNumElements()) break; // We cannot reliably get layout information without an execution // context. if (bit_offset_ptr) *bit_offset_ptr = LLDB_INVALID_IVAR_OFFSET; if (bitfield_bit_size_ptr) *bitfield_bit_size_ptr = 0; if (is_bitfield_ptr) *is_bitfield_ptr = false; name = GetTupleElementName(tuple_type, idx); const auto &child = tuple_type->getElement(idx); return CompilerType(GetASTContext(), child.getType().getPointer()); } case swift::TypeKind::Class: case swift::TypeKind::BoundGenericClass: { auto class_decl = swift_can_type->getClassOrBoundGenericClass(); if (class_decl->hasSuperclass()) { if (idx == 0) { swift::Type superclass_swift_type = swift_can_type->getSuperclass(); CompilerType superclass_type(GetASTContext(), superclass_swift_type.getPointer()); name = GetSuperclassName(superclass_type); // We cannot reliably get layout information without an execution // context. if (bit_offset_ptr) *bit_offset_ptr = LLDB_INVALID_IVAR_OFFSET; if (bitfield_bit_size_ptr) *bitfield_bit_size_ptr = 0; if (is_bitfield_ptr) *is_bitfield_ptr = false; return superclass_type; } // Adjust the index to refer into the stored properties. --idx; } LLVM_FALLTHROUGH; } case swift::TypeKind::Struct: case swift::TypeKind::BoundGenericStruct: { auto nominal = swift_can_type->getAnyNominal(); auto stored_properties = GetStoredProperties(nominal); if (idx >= stored_properties.size()) break; auto property = stored_properties[idx]; name = property->getBaseName().userFacingName(); // We cannot reliably get layout information without an execution // context. if (bit_offset_ptr) *bit_offset_ptr = LLDB_INVALID_IVAR_OFFSET; if (bitfield_bit_size_ptr) *bitfield_bit_size_ptr = 0; if (is_bitfield_ptr) *is_bitfield_ptr = false; swift::Type child_swift_type = swift_can_type->getTypeOfMember( nominal->getModuleContext(), property, nullptr); return CompilerType(GetASTContext(), child_swift_type.getPointer()); } case swift::TypeKind::Protocol: case swift::TypeKind::ProtocolComposition: { ProtocolInfo protocol_info; if (!GetProtocolTypeInfo( CompilerType(GetASTContext(), GetSwiftType(type)), protocol_info)) break; if (idx >= protocol_info.m_num_storage_words) break; CompilerType compiler_type(GetASTContext(), GetSwiftType(type)); CompilerType child_type; std::tie(child_type, name) = GetExistentialTypeChild(GetASTContext(), compiler_type, protocol_info, idx); uint64_t child_size = child_type.GetByteSize(nullptr); if (bit_offset_ptr) *bit_offset_ptr = idx * child_size * 8; if (bitfield_bit_size_ptr) *bitfield_bit_size_ptr = 0; if (is_bitfield_ptr) *is_bitfield_ptr = false; return child_type; } case swift::TypeKind::ExistentialMetatype: case swift::TypeKind::Metatype: break; case swift::TypeKind::Module: case swift::TypeKind::Archetype: case swift::TypeKind::Function: case swift::TypeKind::GenericFunction: case swift::TypeKind::LValue: case swift::TypeKind::UnboundGeneric: case swift::TypeKind::TypeVariable: case swift::TypeKind::DynamicSelf: case swift::TypeKind::SILBox: case swift::TypeKind::SILFunction: case swift::TypeKind::SILBlockStorage: case swift::TypeKind::InOut: case swift::TypeKind::Unresolved: break; case swift::TypeKind::Optional: case swift::TypeKind::NameAlias: case swift::TypeKind::Paren: case swift::TypeKind::Dictionary: case swift::TypeKind::ArraySlice: assert(false && "Not a canonical type"); break; } return CompilerType(); } // If a pointer to a pointee type (the clang_type arg) says that it has no // children, then we either need to trust it, or override it and return a // different result. For example, an "int *" has one child that is an integer, // but a function pointer doesn't have any children. Likewise if a Record type // claims it has no children, then there really is nothing to show. uint32_t SwiftASTContext::GetNumPointeeChildren(void *type) { if (!type) return 0; swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); switch (type_kind) { case swift::TypeKind::Error: return 0; case swift::TypeKind::BuiltinInteger: return 1; case swift::TypeKind::BuiltinFloat: return 1; case swift::TypeKind::BuiltinRawPointer: return 1; case swift::TypeKind::BuiltinUnsafeValueBuffer: return 1; case swift::TypeKind::BuiltinNativeObject: return 1; case swift::TypeKind::BuiltinUnknownObject: return 1; case swift::TypeKind::BuiltinBridgeObject: return 1; case swift::TypeKind::BuiltinVector: return 0; case swift::TypeKind::UnmanagedStorage: case swift::TypeKind::UnownedStorage: case swift::TypeKind::WeakStorage: return GetNumPointeeChildren( swift::cast<swift::ReferenceStorageType>(swift_can_type) .getPointer()); case swift::TypeKind::Tuple: return 0; case swift::TypeKind::GenericTypeParam: return 0; case swift::TypeKind::DependentMember: return 0; case swift::TypeKind::Enum: return 0; case swift::TypeKind::Struct: return 0; case swift::TypeKind::Class: return 0; case swift::TypeKind::Protocol: return 0; case swift::TypeKind::Metatype: return 0; case swift::TypeKind::Module: return 0; case swift::TypeKind::Archetype: return 0; case swift::TypeKind::Function: return 0; case swift::TypeKind::GenericFunction: return 0; case swift::TypeKind::ProtocolComposition: return 0; case swift::TypeKind::LValue: return 1; case swift::TypeKind::UnboundGeneric: return 0; case swift::TypeKind::BoundGenericClass: return 0; case swift::TypeKind::BoundGenericEnum: return 0; case swift::TypeKind::BoundGenericStruct: return 0; case swift::TypeKind::TypeVariable: return 0; case swift::TypeKind::ExistentialMetatype: return 0; case swift::TypeKind::DynamicSelf: return 0; case swift::TypeKind::SILBox: return 0; case swift::TypeKind::SILFunction: return 0; case swift::TypeKind::SILBlockStorage: return 0; case swift::TypeKind::InOut: return 0; case swift::TypeKind::Unresolved: return 0; case swift::TypeKind::Optional: case swift::TypeKind::NameAlias: case swift::TypeKind::Paren: case swift::TypeKind::Dictionary: case swift::TypeKind::ArraySlice: assert(false && "Not a canonical type"); break; } return 0; } static int64_t GetInstanceVariableOffset_Metadata( ValueObject *valobj, ExecutionContext *exe_ctx, const CompilerType &type, ConstString ivar_name, const CompilerType &ivar_type) { Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); if (log) log->Printf( "[GetInstanceVariableOffset_Metadata] ivar_name = %s, type = %s", ivar_name.AsCString(), type.GetTypeName().AsCString()); Process *process = exe_ctx->GetProcessPtr(); if (process) { SwiftLanguageRuntime *runtime = process->GetSwiftLanguageRuntime(); if (runtime) { Status error; if (auto offset = runtime->GetMemberVariableOffset(type, valobj, ivar_name, &error)) { if (log) log->Printf("[GetInstanceVariableOffset_Metadata] for %s: %llu", ivar_name.AsCString(), *offset); return *offset; } else if (log) { log->Printf("[GetInstanceVariableOffset_Metadata] resolver failure: %s", error.AsCString()); } } else if (log) log->Printf("[GetInstanceVariableOffset_Metadata] no runtime"); } else if (log) log->Printf("[GetInstanceVariableOffset_Metadata] no process"); return LLDB_INVALID_IVAR_OFFSET; } static int64_t GetInstanceVariableOffset(ValueObject *valobj, ExecutionContext *exe_ctx, const CompilerType &class_type, const char *ivar_name, const CompilerType &ivar_type) { int64_t offset = LLDB_INVALID_IVAR_OFFSET; if (ivar_name && ivar_name[0]) { if (exe_ctx) { Target *target = exe_ctx->GetTargetPtr(); if (target) { offset = GetInstanceVariableOffset_Metadata( valobj, exe_ctx, class_type, ConstString(ivar_name), ivar_type); } } } return offset; } bool SwiftASTContext::IsNonTriviallyManagedReferenceType( const CompilerType &type, NonTriviallyManagedReferenceStrategy &strategy, CompilerType *underlying_type) { if (auto ast = llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); switch (type_kind) { default: break; case swift::TypeKind::UnmanagedStorage: { strategy = NonTriviallyManagedReferenceStrategy::eUnmanaged; if (underlying_type) *underlying_type = CompilerType( ast, swift_can_type->getReferenceStorageReferent() .getPointer()); } return true; case swift::TypeKind::UnownedStorage: { strategy = NonTriviallyManagedReferenceStrategy::eUnowned; if (underlying_type) *underlying_type = CompilerType( ast, swift_can_type->getReferenceStorageReferent() .getPointer()); } return true; case swift::TypeKind::WeakStorage: { strategy = NonTriviallyManagedReferenceStrategy::eWeak; if (underlying_type) *underlying_type = CompilerType( ast, swift_can_type->getReferenceStorageReferent() .getPointer()); } return true; } } return false; } CompilerType SwiftASTContext::GetChildCompilerTypeAtIndex( void *type, ExecutionContext *exe_ctx, size_t idx, bool transparent_pointers, bool omit_empty_base_classes, bool ignore_array_bounds, std::string &child_name, uint32_t &child_byte_size, int32_t &child_byte_offset, uint32_t &child_bitfield_bit_size, uint32_t &child_bitfield_bit_offset, bool &child_is_base_class, bool &child_is_deref_of_parent, ValueObject *valobj, uint64_t &language_flags) { VALID_OR_RETURN(CompilerType()); if (!type) return CompilerType(); language_flags = 0; swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); switch (type_kind) { case swift::TypeKind::Error: case swift::TypeKind::BuiltinInteger: case swift::TypeKind::BuiltinFloat: case swift::TypeKind::BuiltinRawPointer: case swift::TypeKind::BuiltinNativeObject: case swift::TypeKind::BuiltinUnknownObject: case swift::TypeKind::BuiltinUnsafeValueBuffer: case swift::TypeKind::BuiltinBridgeObject: case swift::TypeKind::BuiltinVector: break; case swift::TypeKind::UnmanagedStorage: case swift::TypeKind::UnownedStorage: case swift::TypeKind::WeakStorage: return CompilerType(GetASTContext(), swift_can_type->getReferenceStorageReferent()) .GetChildCompilerTypeAtIndex( exe_ctx, idx, transparent_pointers, omit_empty_base_classes, ignore_array_bounds, child_name, child_byte_size, child_byte_offset, child_bitfield_bit_size, child_bitfield_bit_offset, child_is_base_class, child_is_deref_of_parent, valobj, language_flags); case swift::TypeKind::GenericTypeParam: case swift::TypeKind::DependentMember: break; case swift::TypeKind::Enum: case swift::TypeKind::BoundGenericEnum: { SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type); if (cached_enum_info && idx < cached_enum_info->GetNumElementsWithPayload()) { const SwiftEnumDescriptor::ElementInfo *element_info = cached_enum_info->GetElementWithPayloadAtIndex(idx); child_name.assign(element_info->name.GetCString()); child_byte_size = element_info->payload_type.GetByteSize( exe_ctx ? exe_ctx->GetBestExecutionContextScope() : NULL); child_byte_offset = 0; child_bitfield_bit_size = 0; child_bitfield_bit_offset = 0; child_is_base_class = false; child_is_deref_of_parent = false; if (element_info->is_indirect) { language_flags |= LanguageFlags::eIsIndirectEnumCase; return CompilerType(GetASTContext(), GetASTContext()->TheRawPointerType.getPointer()); } else return element_info->payload_type; } } break; case swift::TypeKind::Tuple: { auto tuple_type = cast<swift::TupleType>(swift_can_type); if (idx >= tuple_type->getNumElements()) break; const auto &child = tuple_type->getElement(idx); // Format the integer. llvm::SmallString<16> printed_idx; llvm::raw_svector_ostream(printed_idx) << idx; CompilerType child_type(GetASTContext(), child.getType().getPointer()); auto exe_ctx_scope = exe_ctx ? exe_ctx->GetBestExecutionContextScope() : NULL; child_name = GetTupleElementName(tuple_type, idx, printed_idx); child_byte_size = child_type.GetByteSize(exe_ctx_scope); child_is_base_class = false; child_is_deref_of_parent = false; CompilerType compiler_type(GetASTContext(), GetSwiftType(type)); child_byte_offset = GetInstanceVariableOffset(valobj, exe_ctx, compiler_type, printed_idx.c_str(), child_type); child_bitfield_bit_size = 0; child_bitfield_bit_offset = 0; return child_type; } case swift::TypeKind::Class: case swift::TypeKind::BoundGenericClass: { auto class_decl = swift_can_type->getClassOrBoundGenericClass(); // Child 0 is the superclass, if there is one. if (class_decl->hasSuperclass()) { if (idx == 0) { swift::Type superclass_swift_type = swift_can_type->getSuperclass(); CompilerType superclass_type(GetASTContext(), superclass_swift_type.getPointer()); child_name = GetSuperclassName(superclass_type); auto exe_ctx_scope = exe_ctx ? exe_ctx->GetBestExecutionContextScope() : NULL; child_byte_size = superclass_type.GetByteSize(exe_ctx_scope); child_is_base_class = true; child_is_deref_of_parent = false; child_byte_offset = 0; child_bitfield_bit_size = 0; child_bitfield_bit_offset = 0; language_flags |= LanguageFlags::eIgnoreInstancePointerness; return superclass_type; } // Adjust the index to refer into the stored properties. --idx; } LLVM_FALLTHROUGH; } case swift::TypeKind::Struct: case swift::TypeKind::BoundGenericStruct: { auto nominal = swift_can_type->getAnyNominal(); auto stored_properties = GetStoredProperties(nominal); if (idx >= stored_properties.size()) break; // Find the stored property with this index. auto property = stored_properties[idx]; swift::Type child_swift_type = swift_can_type->getTypeOfMember( nominal->getModuleContext(), property, nullptr); CompilerType child_type(GetASTContext(), child_swift_type.getPointer()); auto exe_ctx_scope = exe_ctx ? exe_ctx->GetBestExecutionContextScope() : NULL; child_name = property->getBaseName().userFacingName(); child_byte_size = child_type.GetByteSize(exe_ctx_scope); child_is_base_class = false; child_is_deref_of_parent = false; CompilerType compiler_type(GetASTContext(), GetSwiftType(type)); child_byte_offset = GetInstanceVariableOffset(valobj, exe_ctx, compiler_type, child_name.c_str(), child_type); child_bitfield_bit_size = 0; child_bitfield_bit_offset = 0; return child_type; } case swift::TypeKind::Protocol: case swift::TypeKind::ProtocolComposition: { ProtocolInfo protocol_info; if (!GetProtocolTypeInfo( CompilerType(GetASTContext(), GetSwiftType(type)), protocol_info)) break; if (idx >= protocol_info.m_num_storage_words) break; CompilerType compiler_type(GetASTContext(), GetSwiftType(type)); CompilerType child_type; std::tie(child_type, child_name) = GetExistentialTypeChild(GetASTContext(), compiler_type, protocol_info, idx); auto exe_ctx_scope = exe_ctx ? exe_ctx->GetBestExecutionContextScope() : nullptr; child_byte_size = child_type.GetByteSize(exe_ctx_scope); child_byte_offset = idx * child_byte_size; child_bitfield_bit_size = 0; child_bitfield_bit_offset = 0; child_is_base_class = false; child_is_deref_of_parent = false; return child_type; } case swift::TypeKind::ExistentialMetatype: case swift::TypeKind::Metatype: break; case swift::TypeKind::Module: case swift::TypeKind::Archetype: case swift::TypeKind::Function: case swift::TypeKind::GenericFunction: break; case swift::TypeKind::LValue: if (idx < GetNumChildren(type, omit_empty_base_classes)) { CompilerType pointee_clang_type(GetNonReferenceType(type)); Flags pointee_clang_type_flags(pointee_clang_type.GetTypeInfo()); const char *parent_name = valobj ? valobj->GetName().GetCString() : NULL; if (parent_name) { child_name.assign(1, '&'); child_name += parent_name; } // We have a pointer to a simple type if (idx == 0) { child_byte_size = pointee_clang_type.GetByteSize( exe_ctx ? exe_ctx->GetBestExecutionContextScope() : NULL); child_byte_offset = 0; return pointee_clang_type; } } break; case swift::TypeKind::UnboundGeneric: break; case swift::TypeKind::TypeVariable: break; case swift::TypeKind::DynamicSelf: case swift::TypeKind::SILBox: case swift::TypeKind::SILFunction: case swift::TypeKind::SILBlockStorage: case swift::TypeKind::InOut: case swift::TypeKind::Unresolved: break; case swift::TypeKind::Optional: case swift::TypeKind::NameAlias: case swift::TypeKind::Paren: case swift::TypeKind::Dictionary: case swift::TypeKind::ArraySlice: assert(false && "Not a canonical type"); break; } return CompilerType(); } // Look for a child member (doesn't include base classes, but it does include // their members) in the type hierarchy. Returns an index path into "clang_type" // on how to reach the appropriate member. // // class A // { // public: // int m_a; // int m_b; // }; // // class B // { // }; // // class C : // public B, // public A // { // }; // // If we have a clang type that describes "class C", and we wanted to look for // "m_b" in it: // // With omit_empty_base_classes == false we would get an integer array back // with: // { 1, 1 } // The first index 1 is the child index for "class A" within class C. // The second index 1 is the child index for "m_b" within class A. // // With omit_empty_base_classes == true we would get an integer array back with: // { 0, 1 } // The first index 0 is the child index for "class A" within class C (since // class B doesn't have any members it doesn't count). // The second index 1 is the child index for "m_b" within class A. size_t SwiftASTContext::GetIndexOfChildMemberWithName( void *type, const char *name, bool omit_empty_base_classes, std::vector<uint32_t> &child_indexes) { VALID_OR_RETURN(0); if (type && name && name[0]) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); switch (type_kind) { case swift::TypeKind::Error: case swift::TypeKind::BuiltinInteger: case swift::TypeKind::BuiltinFloat: case swift::TypeKind::BuiltinRawPointer: case swift::TypeKind::BuiltinNativeObject: case swift::TypeKind::BuiltinUnknownObject: case swift::TypeKind::BuiltinUnsafeValueBuffer: case swift::TypeKind::BuiltinBridgeObject: case swift::TypeKind::BuiltinVector: break; case swift::TypeKind::UnmanagedStorage: case swift::TypeKind::UnownedStorage: case swift::TypeKind::WeakStorage: return CompilerType(GetASTContext(), swift_can_type->getReferenceStorageReferent()) .GetIndexOfChildMemberWithName(name, omit_empty_base_classes, child_indexes); case swift::TypeKind::GenericTypeParam: case swift::TypeKind::DependentMember: break; case swift::TypeKind::Enum: case swift::TypeKind::BoundGenericEnum: { SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type); if (cached_enum_info) { ConstString const_name(name); const size_t num_sized_elements = cached_enum_info->GetNumElementsWithPayload(); for (size_t i = 0; i < num_sized_elements; ++i) { if (cached_enum_info->GetElementWithPayloadAtIndex(i)->name == const_name) { child_indexes.push_back(i); return child_indexes.size(); } } } } break; case swift::TypeKind::Tuple: { // For tuples only always look for the member by number first as a tuple // element can be named, yet still be accessed by the number... swift::TupleType *tuple_type = swift_can_type->castTo<swift::TupleType>(); uint32_t tuple_idx = StringConvert::ToUInt32(name, UINT32_MAX); if (tuple_idx != UINT32_MAX) { if (tuple_idx < tuple_type->getNumElements()) { child_indexes.push_back(tuple_idx); return child_indexes.size(); } else return 0; } // Otherwise, perform lookup by name. for (uint32_t tuple_idx : swift::range(tuple_type->getNumElements())) { if (tuple_type->getElement(tuple_idx).getName().str() == name) { child_indexes.push_back(tuple_idx); return child_indexes.size(); } } return 0; } case swift::TypeKind::Struct: case swift::TypeKind::Class: case swift::TypeKind::BoundGenericClass: case swift::TypeKind::BoundGenericStruct: { auto nominal = swift_can_type->getAnyNominal(); auto stored_properties = GetStoredProperties(nominal); auto class_decl = llvm::dyn_cast<swift::ClassDecl>(nominal); // Search the stored properties. for (unsigned idx : indices(stored_properties)) { auto property = stored_properties[idx]; if (property->getBaseName().userFacingName() == name) { // We found it! // If we have a superclass, adjust the index accordingly. if (class_decl && class_decl->hasSuperclass()) ++idx; child_indexes.push_back(idx); return child_indexes.size(); } } // Search the superclass, if there is one. if (class_decl && class_decl->hasSuperclass()) { // Push index zero for the base class child_indexes.push_back(0); // Look in the superclass. swift::Type superclass_swift_type = swift_can_type->getSuperclass(); CompilerType superclass_type(GetASTContext(), superclass_swift_type.getPointer()); if (superclass_type.GetIndexOfChildMemberWithName( name, omit_empty_base_classes, child_indexes)) return child_indexes.size(); // We didn't find a stored property matching "name" in our // superclass, pop the superclass zero index that // we pushed on above. child_indexes.pop_back(); } } break; case swift::TypeKind::Protocol: case swift::TypeKind::ProtocolComposition: { ProtocolInfo protocol_info; if (!GetProtocolTypeInfo(CompilerType(GetASTContext(), GetSwiftType(type)), protocol_info)) break; CompilerType compiler_type(GetASTContext(), GetSwiftType(type)); for (unsigned idx : swift::range(protocol_info.m_num_storage_words)) { CompilerType child_type; std::string child_name; std::tie(child_type, child_name) = GetExistentialTypeChild(GetASTContext(), compiler_type, protocol_info, idx); if (name == child_name) { child_indexes.push_back(idx); return child_indexes.size(); } } } break; case swift::TypeKind::ExistentialMetatype: case swift::TypeKind::Metatype: break; case swift::TypeKind::Module: case swift::TypeKind::Archetype: case swift::TypeKind::Function: case swift::TypeKind::GenericFunction: break; case swift::TypeKind::InOut: case swift::TypeKind::LValue: { CompilerType pointee_clang_type(GetNonReferenceType(type)); if (pointee_clang_type.IsAggregateType()) { return pointee_clang_type.GetIndexOfChildMemberWithName( name, omit_empty_base_classes, child_indexes); } } break; case swift::TypeKind::UnboundGeneric: break; case swift::TypeKind::TypeVariable: break; case swift::TypeKind::DynamicSelf: case swift::TypeKind::SILBox: case swift::TypeKind::SILFunction: case swift::TypeKind::SILBlockStorage: case swift::TypeKind::Unresolved: break; case swift::TypeKind::Optional: case swift::TypeKind::NameAlias: case swift::TypeKind::Paren: case swift::TypeKind::Dictionary: case swift::TypeKind::ArraySlice: assert(false && "Not a canonical type"); break; } } return 0; } // Get the index of the child of "clang_type" whose name matches. This function // doesn't descend into the children, but only looks one level deep and name // matches can include base class names. uint32_t SwiftASTContext::GetIndexOfChildWithName(void *type, const char *name, bool omit_empty_base_classes) { VALID_OR_RETURN(UINT32_MAX); std::vector<uint32_t> child_indexes; size_t num_child_indexes = GetIndexOfChildMemberWithName(type, name, omit_empty_base_classes, child_indexes); return num_child_indexes == 1 ? child_indexes.front() : UINT32_MAX; } size_t SwiftASTContext::GetNumTemplateArguments(void *type) { if (!type) return 0; swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); switch (type_kind) { case swift::TypeKind::UnboundGeneric: { swift::UnboundGenericType *unbound_generic_type = swift_can_type->castTo<swift::UnboundGenericType>(); auto *nominal_type_decl = unbound_generic_type->getDecl(); swift::GenericParamList *generic_param_list = nominal_type_decl->getGenericParams(); return generic_param_list->getParams().size(); } break; case swift::TypeKind::BoundGenericClass: case swift::TypeKind::BoundGenericStruct: case swift::TypeKind::BoundGenericEnum: { swift::BoundGenericType *bound_generic_type = swift_can_type->castTo<swift::BoundGenericType>(); return bound_generic_type->getGenericArgs().size(); } default: break; } return 0; } bool SwiftASTContext::GetSelectedEnumCase(const CompilerType &type, const DataExtractor &data, ConstString *name, bool *has_payload, CompilerType *payload, bool *is_indirect) { if (auto ast = llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); switch (type_kind) { default: break; case swift::TypeKind::Enum: case swift::TypeKind::BoundGenericEnum: { SwiftEnumDescriptor *cached_enum_info = ast->GetCachedEnumInfo(swift_can_type.getPointer()); if (cached_enum_info) { auto enum_elem_info = cached_enum_info->GetElementFromData(data); if (enum_elem_info) { if (name) *name = enum_elem_info->name; if (has_payload) *has_payload = enum_elem_info->has_payload; if (payload) *payload = enum_elem_info->payload_type; if (is_indirect) *is_indirect = enum_elem_info->is_indirect; return true; } } } break; } } return false; } lldb::GenericKind SwiftASTContext::GetGenericArgumentKind(void *type, size_t idx) { if (type) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); if (auto *unbound_generic_type = swift_can_type->getAs<swift::UnboundGenericType>()) return eUnboundGenericKindType; if (auto *bound_generic_type = swift_can_type->getAs<swift::BoundGenericType>()) if (idx < bound_generic_type->getGenericArgs().size()) return eBoundGenericKindType; } return eNullGenericKindType; } CompilerType SwiftASTContext::GetBoundGenericType(void *type, size_t idx) { VALID_OR_RETURN(CompilerType()); if (type) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); if (auto *bound_generic_type = swift_can_type->getAs<swift::BoundGenericType>()) if (idx < bound_generic_type->getGenericArgs().size()) return CompilerType( GetASTContext(), bound_generic_type->getGenericArgs()[idx].getPointer()); } return CompilerType(); } CompilerType SwiftASTContext::GetUnboundGenericType(void *type, size_t idx) { VALID_OR_RETURN(CompilerType()); if (type) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); if (auto *unbound_generic_type = swift_can_type->getAs<swift::UnboundGenericType>()) { auto *nominal_type_decl = unbound_generic_type->getDecl(); swift::GenericSignature *generic_sig = nominal_type_decl->getGenericSignature(); auto depTy = generic_sig->getGenericParams()[idx]; return CompilerType(GetASTContext(), nominal_type_decl->mapTypeIntoContext(depTy) ->castTo<swift::ArchetypeType>()); } } return CompilerType(); } CompilerType SwiftASTContext::GetGenericArgumentType(void *type, size_t idx) { VALID_OR_RETURN(CompilerType()); switch (GetGenericArgumentKind(type, idx)) { case eBoundGenericKindType: return GetBoundGenericType(type, idx); case eUnboundGenericKindType: return GetUnboundGenericType(type, idx); default: break; } return CompilerType(); } CompilerType SwiftASTContext::GetTypeForFormatters(void *type) { VALID_OR_RETURN(CompilerType()); if (type) { swift::Type swift_type(GetSwiftType(type)); return CompilerType(GetASTContext(), swift_type); } return CompilerType(); } LazyBool SwiftASTContext::ShouldPrintAsOneLiner(void *type, ValueObject *valobj) { if (type) { CompilerType can_compiler_type(GetCanonicalType(type)); if (IsImportedType(can_compiler_type, nullptr)) return eLazyBoolNo; } if (valobj) { if (valobj->IsBaseClass()) return eLazyBoolNo; if ((valobj->GetLanguageFlags() & LanguageFlags::eIsIndirectEnumCase) == LanguageFlags::eIsIndirectEnumCase) return eLazyBoolNo; } return eLazyBoolCalculate; } bool SwiftASTContext::IsMeaninglessWithoutDynamicResolution(void *type) { if (type) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); switch (type_kind) { case swift::TypeKind::Archetype: return true; default: return false; } } return false; } //---------------------------------------------------------------------- // Dumping types //---------------------------------------------------------------------- #define DEPTH_INCREMENT 2 void SwiftASTContext::DumpValue( void *type, ExecutionContext *exe_ctx, Stream *s, lldb::Format format, const lldb_private::DataExtractor &data, lldb::offset_t data_byte_offset, size_t data_byte_size, uint32_t bitfield_bit_size, uint32_t bitfield_bit_offset, bool show_types, bool show_summary, bool verbose, uint32_t depth) {} bool SwiftASTContext::DumpTypeValue( void *type, Stream *s, lldb::Format format, const lldb_private::DataExtractor &data, lldb::offset_t byte_offset, size_t byte_size, uint32_t bitfield_bit_size, uint32_t bitfield_bit_offset, ExecutionContextScope *exe_scope, bool is_base_class) { VALID_OR_RETURN(false); if (!type) return false; swift::CanType swift_can_type(GetCanonicalSwiftType(type)); const swift::TypeKind type_kind = swift_can_type->getKind(); switch (type_kind) { case swift::TypeKind::Error: break; case swift::TypeKind::Class: case swift::TypeKind::BoundGenericClass: // If we have a class that is in a variable then it is a pointer, // else if it is a base class, it has no value. if (is_base_class) break; // Fall through to case below case swift::TypeKind::BuiltinInteger: case swift::TypeKind::BuiltinFloat: case swift::TypeKind::BuiltinRawPointer: case swift::TypeKind::BuiltinNativeObject: case swift::TypeKind::BuiltinUnsafeValueBuffer: case swift::TypeKind::BuiltinUnknownObject: case swift::TypeKind::BuiltinBridgeObject: case swift::TypeKind::Archetype: case swift::TypeKind::Function: case swift::TypeKind::GenericFunction: case swift::TypeKind::LValue: { uint32_t item_count = 1; // A few formats, we might need to modify our size and count for depending // on how we are trying to display the value... switch (format) { default: case eFormatBoolean: case eFormatBinary: case eFormatComplex: case eFormatCString: // NULL terminated C strings case eFormatDecimal: case eFormatEnum: case eFormatHex: case eFormatHexUppercase: case eFormatFloat: case eFormatOctal: case eFormatOSType: case eFormatUnsigned: case eFormatPointer: case eFormatVectorOfChar: case eFormatVectorOfSInt8: case eFormatVectorOfUInt8: case eFormatVectorOfSInt16: case eFormatVectorOfUInt16: case eFormatVectorOfSInt32: case eFormatVectorOfUInt32: case eFormatVectorOfSInt64: case eFormatVectorOfUInt64: case eFormatVectorOfFloat32: case eFormatVectorOfFloat64: case eFormatVectorOfUInt128: break; case eFormatAddressInfo: if (byte_size == 0) { byte_size = exe_scope->CalculateTarget() ->GetArchitecture() .GetAddressByteSize(); item_count = 1; } break; case eFormatChar: case eFormatCharPrintable: case eFormatCharArray: case eFormatBytes: case eFormatBytesWithASCII: item_count = byte_size; byte_size = 1; break; case eFormatUnicode16: item_count = byte_size / 2; byte_size = 2; break; case eFormatUnicode32: item_count = byte_size / 4; byte_size = 4; break; } return DumpDataExtractor(data, s, byte_offset, format, byte_size, item_count, UINT32_MAX, LLDB_INVALID_ADDRESS, bitfield_bit_size, bitfield_bit_offset, exe_scope); } break; case swift::TypeKind::BuiltinVector: break; case swift::TypeKind::Tuple: break; case swift::TypeKind::UnmanagedStorage: case swift::TypeKind::UnownedStorage: case swift::TypeKind::WeakStorage: return CompilerType(GetASTContext(), swift_can_type->getReferenceStorageReferent()) .DumpTypeValue(s, format, data, byte_offset, byte_size, bitfield_bit_size, bitfield_bit_offset, exe_scope, is_base_class); case swift::TypeKind::Enum: case swift::TypeKind::BoundGenericEnum: { SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type); if (cached_enum_info) { auto enum_elem_info = cached_enum_info->GetElementFromData(data); if (enum_elem_info) s->Printf("%s", enum_elem_info->name.GetCString()); else { lldb::offset_t ptr = 0; if (data.GetByteSize()) s->Printf("<invalid> (0x%" PRIx8 ")", data.GetU8(&ptr)); else s->Printf("<empty>"); } return true; } else s->Printf("<unknown type>"); } break; case swift::TypeKind::Struct: case swift::TypeKind::Protocol: case swift::TypeKind::GenericTypeParam: case swift::TypeKind::DependentMember: return false; case swift::TypeKind::ExistentialMetatype: case swift::TypeKind::Metatype: { return DumpDataExtractor(data, s, byte_offset, eFormatPointer, byte_size, 1, UINT32_MAX, LLDB_INVALID_ADDRESS, bitfield_bit_size, bitfield_bit_offset, exe_scope); } break; case swift::TypeKind::Module: case swift::TypeKind::ProtocolComposition: case swift::TypeKind::UnboundGeneric: case swift::TypeKind::BoundGenericStruct: case swift::TypeKind::TypeVariable: case swift::TypeKind::DynamicSelf: case swift::TypeKind::SILBox: case swift::TypeKind::SILFunction: case swift::TypeKind::SILBlockStorage: case swift::TypeKind::InOut: case swift::TypeKind::Unresolved: break; case swift::TypeKind::Optional: case swift::TypeKind::NameAlias: case swift::TypeKind::Paren: case swift::TypeKind::Dictionary: case swift::TypeKind::ArraySlice: assert(false && "Not a canonical type"); break; } return 0; } bool SwiftASTContext::IsImportedType(const CompilerType &type, CompilerType *original_type) { bool success = false; if (llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) { do { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); swift::NominalType *nominal_type = swift_can_type->getAs<swift::NominalType>(); if (!nominal_type) break; swift::NominalTypeDecl *nominal_type_decl = nominal_type->getDecl(); if (nominal_type_decl && nominal_type_decl->hasClangNode()) { const clang::Decl *clang_decl = nominal_type_decl->getClangDecl(); if (!clang_decl) break; success = true; if (!original_type) break; if (const clang::ObjCInterfaceDecl *objc_interface_decl = llvm::dyn_cast<clang::ObjCInterfaceDecl>( clang_decl)) // ObjCInterfaceDecl is not a TypeDecl { *original_type = CompilerType(&objc_interface_decl->getASTContext(), clang::QualType::getFromOpaquePtr( objc_interface_decl->getTypeForDecl())); } else if (const clang::TypeDecl *type_decl = llvm::dyn_cast<clang::TypeDecl>(clang_decl)) { *original_type = CompilerType( &type_decl->getASTContext(), clang::QualType::getFromOpaquePtr(type_decl->getTypeForDecl())); } else // TODO: any more cases that we care about? { *original_type = CompilerType(); } } } while (0); } return success; } bool SwiftASTContext::IsImportedObjectiveCType(const CompilerType &type, CompilerType *original_type) { bool success = false; if (llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) { CompilerType local_original_type; if (IsImportedType(type, &local_original_type)) { if (local_original_type.IsValid()) { ClangASTContext *clang_ast = llvm::dyn_cast_or_null<ClangASTContext>( local_original_type.GetTypeSystem()); if (clang_ast && clang_ast->IsObjCObjectOrInterfaceType(local_original_type)) { if (original_type) *original_type = local_original_type; success = true; } } } } return success; } void SwiftASTContext::DumpSummary(void *type, ExecutionContext *exe_ctx, Stream *s, const lldb_private::DataExtractor &data, lldb::offset_t data_byte_offset, size_t data_byte_size) {} size_t SwiftASTContext::ConvertStringToFloatValue(void *type, const char *s, uint8_t *dst, size_t dst_size) { return 0; } void SwiftASTContext::DumpTypeDescription(void *type) { StreamFile s(stdout, false); DumpTypeDescription(type, &s); } void SwiftASTContext::DumpTypeDescription(void *type, Stream *s) { DumpTypeDescription(type, s, false, true); } void SwiftASTContext::DumpTypeDescription(void *type, bool print_help_if_available, bool print_extensions_if_available) { StreamFile s(stdout, false); DumpTypeDescription(type, &s, print_help_if_available, print_extensions_if_available); } static void PrintSwiftNominalType(swift::NominalTypeDecl *nominal_type_decl, Stream *s, bool print_help_if_available, bool print_extensions_if_available) { if (nominal_type_decl && s) { std::string buffer; llvm::raw_string_ostream ostream(buffer); const swift::PrintOptions &print_options( SwiftASTContext::GetUserVisibleTypePrintingOptions( print_help_if_available)); nominal_type_decl->print(ostream, print_options); ostream.flush(); if (buffer.empty() == false) s->Printf("%s\n", buffer.c_str()); if (print_extensions_if_available) { for (auto ext : nominal_type_decl->getExtensions()) { if (ext) { buffer.clear(); llvm::raw_string_ostream ext_ostream(buffer); ext->print(ext_ostream, print_options); ext_ostream.flush(); if (buffer.empty() == false) s->Printf("%s\n", buffer.c_str()); } } } } } void SwiftASTContext::DumpTypeDescription(void *type, Stream *s, bool print_help_if_available, bool print_extensions_if_available) { llvm::SmallVector<char, 1024> buf; llvm::raw_svector_ostream llvm_ostrm(buf); if (type) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); switch (swift_can_type->getKind()) { case swift::TypeKind::Module: { swift::ModuleType *module_type = swift_can_type->castTo<swift::ModuleType>(); swift::ModuleDecl *module = module_type->getModule(); llvm::SmallVector<swift::Decl *, 10> decls; module->getDisplayDecls(decls); for (swift::Decl *decl : decls) { swift::DeclKind kind = decl->getKind(); if (kind >= swift::DeclKind::First_TypeDecl && kind <= swift::DeclKind::Last_TypeDecl) { swift::TypeDecl *type_decl = llvm::dyn_cast_or_null<swift::TypeDecl>(decl); if (type_decl) { CompilerType clang_type(&module->getASTContext(), type_decl->getDeclaredInterfaceType().getPointer()); if (clang_type) { Flags clang_type_flags(clang_type.GetTypeInfo()); DumpTypeDescription(clang_type.GetOpaqueQualType(), s, print_help_if_available, print_extensions_if_available); } } } else if (kind == swift::DeclKind::Func || kind == swift::DeclKind::Var) { std::string buffer; llvm::raw_string_ostream stream(buffer); decl->print(stream, SwiftASTContext::GetUserVisibleTypePrintingOptions( print_help_if_available)); stream.flush(); s->Printf("%s\n", buffer.c_str()); } else if (kind == swift::DeclKind::Import) { swift::ImportDecl *import_decl = llvm::dyn_cast_or_null<swift::ImportDecl>(decl); if (import_decl) { switch (import_decl->getImportKind()) { case swift::ImportKind::Module: { swift::ModuleDecl *imported_module = import_decl->getModule(); if (imported_module) { s->Printf("import %s\n", imported_module->getName().get()); } } break; default: { for (swift::Decl *imported_decl : import_decl->getDecls()) { // all of the non-module things you can import should be a // ValueDecl if (swift::ValueDecl *imported_value_decl = llvm::dyn_cast_or_null<swift::ValueDecl>( imported_decl)) { if (swift::TypeBase *decl_type = imported_value_decl->getInterfaceType().getPointer()) { DumpTypeDescription(decl_type, s, print_help_if_available, print_extensions_if_available); } } } } break; } } } } break; } case swift::TypeKind::Metatype: { s->PutCString("metatype "); swift::MetatypeType *metatype_type = swift_can_type->castTo<swift::MetatypeType>(); DumpTypeDescription(metatype_type->getInstanceType().getPointer(), print_help_if_available, print_extensions_if_available); } break; case swift::TypeKind::UnboundGeneric: { swift::UnboundGenericType *unbound_generic_type = swift_can_type->castTo<swift::UnboundGenericType>(); auto nominal_type_decl = llvm::dyn_cast<swift::NominalTypeDecl>( unbound_generic_type->getDecl()); if (nominal_type_decl) { PrintSwiftNominalType(nominal_type_decl, s, print_help_if_available, print_extensions_if_available); } } break; case swift::TypeKind::GenericFunction: case swift::TypeKind::Function: { swift::AnyFunctionType *any_function_type = swift_can_type->castTo<swift::AnyFunctionType>(); std::string buffer; llvm::raw_string_ostream ostream(buffer); const swift::PrintOptions &print_options( SwiftASTContext::GetUserVisibleTypePrintingOptions( print_help_if_available)); any_function_type->print(ostream, print_options); ostream.flush(); if (buffer.empty() == false) s->Printf("%s\n", buffer.c_str()); } break; case swift::TypeKind::Tuple: { swift::TupleType *tuple_type = swift_can_type->castTo<swift::TupleType>(); std::string buffer; llvm::raw_string_ostream ostream(buffer); const swift::PrintOptions &print_options( SwiftASTContext::GetUserVisibleTypePrintingOptions( print_help_if_available)); tuple_type->print(ostream, print_options); ostream.flush(); if (buffer.empty() == false) s->Printf("%s\n", buffer.c_str()); } break; case swift::TypeKind::BoundGenericClass: case swift::TypeKind::BoundGenericEnum: case swift::TypeKind::BoundGenericStruct: { swift::BoundGenericType *bound_generic_type = swift_can_type->castTo<swift::BoundGenericType>(); swift::NominalTypeDecl *nominal_type_decl = bound_generic_type->getDecl(); PrintSwiftNominalType(nominal_type_decl, s, print_help_if_available, print_extensions_if_available); } break; case swift::TypeKind::BuiltinInteger: { swift::BuiltinIntegerType *builtin_integer_type = swift_can_type->castTo<swift::BuiltinIntegerType>(); s->Printf("builtin integer type of width %u bits\n", builtin_integer_type->getWidth().getGreatestWidth()); break; } case swift::TypeKind::BuiltinFloat: { swift::BuiltinFloatType *builtin_float_type = swift_can_type->castTo<swift::BuiltinFloatType>(); s->Printf("builtin floating-point type of width %u bits\n", builtin_float_type->getBitWidth()); break; } case swift::TypeKind::ProtocolComposition: { swift::ProtocolCompositionType *protocol_composition_type = swift_can_type->castTo<swift::ProtocolCompositionType>(); std::string buffer; llvm::raw_string_ostream ostream(buffer); const swift::PrintOptions &print_options( SwiftASTContext::GetUserVisibleTypePrintingOptions( print_help_if_available)); protocol_composition_type->print(ostream, print_options); ostream.flush(); if (buffer.empty() == false) s->Printf("%s\n", buffer.c_str()); break; } default: { swift::NominalType *nominal_type = llvm::dyn_cast_or_null<swift::NominalType>( swift_can_type.getPointer()); if (nominal_type) { swift::NominalTypeDecl *nominal_type_decl = nominal_type->getDecl(); PrintSwiftNominalType(nominal_type_decl, s, print_help_if_available, print_extensions_if_available); } } break; } if (buf.size() > 0) { s->Write(buf.data(), buf.size()); } } } TypeSP SwiftASTContext::GetCachedType(const ConstString &mangled) { TypeSP type_sp; if (m_swift_type_map.Lookup(mangled.GetCString(), type_sp)) return type_sp; else return TypeSP(); } void SwiftASTContext::SetCachedType(const ConstString &mangled, const TypeSP &type_sp) { m_swift_type_map.Insert(mangled.GetCString(), type_sp); } DWARFASTParser *SwiftASTContext::GetDWARFParser() { if (!m_dwarf_ast_parser_ap) m_dwarf_ast_parser_ap.reset(new DWARFASTParserSwift(*this)); return m_dwarf_ast_parser_ap.get(); } std::vector<lldb::DataBufferSP> & SwiftASTContext::GetASTVectorForModule(const Module *module) { return m_ast_file_data_map[const_cast<Module *>(module)]; } SwiftASTContextForExpressions::SwiftASTContextForExpressions(Target &target) : SwiftASTContext(target.GetArchitecture().GetTriple().getTriple().c_str(), &target), m_persistent_state_up(new SwiftPersistentExpressionState) {} UserExpression *SwiftASTContextForExpressions::GetUserExpression( llvm::StringRef expr, llvm::StringRef prefix, lldb::LanguageType language, Expression::ResultType desired_type, const EvaluateExpressionOptions &options) { TargetSP target_sp = m_target_wp.lock(); if (!target_sp) return nullptr; return new SwiftUserExpression(*target_sp.get(), expr, prefix, language, desired_type, options); } PersistentExpressionState * SwiftASTContextForExpressions::GetPersistentExpressionState() { return m_persistent_state_up.get(); }
1
16,818
why not simply `paths = search_path_opts.LibrarySearchPaths` ?
apple-swift-lldb
cpp
@@ -610,7 +610,7 @@ define(["globalize", "listView", "layoutManager", "userSettings", "focusManager" } if (item) { - return item.Name; + return globalize.translate(item.Name); } if ("Movie" === params.type) {
1
define(["globalize", "listView", "layoutManager", "userSettings", "focusManager", "cardBuilder", "loading", "connectionManager", "alphaNumericShortcuts", "scroller", "playbackManager", "alphaPicker", "emby-itemscontainer", "emby-scroller"], function (globalize, listView, layoutManager, userSettings, focusManager, cardBuilder, loading, connectionManager, AlphaNumericShortcuts, scroller, playbackManager, alphaPicker) { "use strict"; function getInitialLiveTvQuery(instance, params) { var query = { UserId: connectionManager.getApiClient(params.serverId).getCurrentUserId(), StartIndex: 0, Fields: "ChannelInfo,PrimaryImageAspectRatio", Limit: 300 }; if ("Recordings" === params.type) { query.IsInProgress = false; } else { query.HasAired = false; } if (params.genreId) { query.GenreIds = params.genreId; } if ("true" === params.IsMovie) { query.IsMovie = true; } else if ("false" === params.IsMovie) { query.IsMovie = false; } if ("true" === params.IsSeries) { query.IsSeries = true; } else if ("false" === params.IsSeries) { query.IsSeries = false; } if ("true" === params.IsNews) { query.IsNews = true; } else if ("false" === params.IsNews) { query.IsNews = false; } if ("true" === params.IsSports) { query.IsSports = true; } else if ("false" === params.IsSports) { query.IsSports = false; } if ("true" === params.IsKids) { query.IsKids = true; } else if ("false" === params.IsKids) { query.IsKids = false; } if ("true" === params.IsAiring) { query.IsAiring = true; } else if ("false" === params.IsAiring) { query.IsAiring = false; } return modifyQueryWithFilters(instance, query); } function modifyQueryWithFilters(instance, query) { var sortValues = instance.getSortValues(); if (!query.SortBy) { query.SortBy = sortValues.sortBy; query.SortOrder = sortValues.sortOrder; } query.Fields = query.Fields ? query.Fields + ",PrimaryImageAspectRatio" : "PrimaryImageAspectRatio"; query.ImageTypeLimit = 1; var hasFilters; var queryFilters = []; var filters = instance.getFilters(); if (filters.IsPlayed) { queryFilters.push("IsPlayed"); hasFilters = true; } if (filters.IsUnplayed) { queryFilters.push("IsUnplayed"); hasFilters = true; } if (filters.IsFavorite) { queryFilters.push("IsFavorite"); hasFilters = true; } if (filters.IsResumable) { queryFilters.push("IsResumable"); hasFilters = true; } if (filters.VideoTypes) { hasFilters = true; query.VideoTypes = filters.VideoTypes; } if (filters.GenreIds) { hasFilters = true; query.GenreIds = filters.GenreIds; } if (filters.Is4K) { query.Is4K = true; hasFilters = true; } if (filters.IsHD) { query.IsHD = true; hasFilters = true; } if (filters.IsSD) { query.IsHD = false; hasFilters = true; } if (filters.Is3D) { query.Is3D = true; hasFilters = true; } if (filters.HasSubtitles) { query.HasSubtitles = true; hasFilters = true; } if (filters.HasTrailer) { query.HasTrailer = true; hasFilters = true; } if (filters.HasSpecialFeature) { query.HasSpecialFeature = true; hasFilters = true; } if (filters.HasThemeSong) { query.HasThemeSong = true; hasFilters = true; } if (filters.HasThemeVideo) { query.HasThemeVideo = true; hasFilters = true; } query.Filters = queryFilters.length ? queryFilters.join(",") : null; instance.setFilterStatus(hasFilters); if (instance.alphaPicker) { query.NameStartsWithOrGreater = instance.alphaPicker.value(); } return query; } function setSortButtonIcon(btnSortIcon, icon) { btnSortIcon.classList.remove("arrow_downward"); btnSortIcon.classList.remove("arrow_upward"); btnSortIcon.classList.add(icon); } function updateSortText(instance) { var btnSortText = instance.btnSortText; if (btnSortText) { var options = instance.getSortMenuOptions(); var values = instance.getSortValues(); var sortBy = values.sortBy; for (var i = 0, length = options.length; i < length; i++) { if (sortBy === options[i].value) { btnSortText.innerHTML = globalize.translate("SortByValue", options[i].name); break; } } var btnSortIcon = instance.btnSortIcon; if (btnSortIcon) { setSortButtonIcon(btnSortIcon, "Descending" === values.sortOrder ? "arrow_downward" : "arrow_upward"); } } } function updateItemsContainerForViewType(instance) { if ("list" === instance.getViewSettings().imageType) { instance.itemsContainer.classList.remove("vertical-wrap"); instance.itemsContainer.classList.add("vertical-list"); } else { instance.itemsContainer.classList.add("vertical-wrap"); instance.itemsContainer.classList.remove("vertical-list"); } } function updateAlphaPickerState(instance, numItems) { if (instance.alphaPicker) { var alphaPicker = instance.alphaPickerElement; if (alphaPicker) { var values = instance.getSortValues(); if (null == numItems) { numItems = 100; } if ("SortName" === values.sortBy && "Ascending" === values.sortOrder && numItems > 40) { alphaPicker.classList.remove("hide"); instance.itemsContainer.parentNode.classList.add("padded-right-withalphapicker"); } else { alphaPicker.classList.add("hide"); instance.itemsContainer.parentNode.classList.remove("padded-right-withalphapicker"); } } } } function getItems(instance, params, item, sortBy, startIndex, limit) { var apiClient = connectionManager.getApiClient(params.serverId); instance.queryRecursive = false; if ("Recordings" === params.type) { return apiClient.getLiveTvRecordings(getInitialLiveTvQuery(instance, params)); } if ("Programs" === params.type) { if ("true" === params.IsAiring) { return apiClient.getLiveTvRecommendedPrograms(getInitialLiveTvQuery(instance, params)); } return apiClient.getLiveTvPrograms(getInitialLiveTvQuery(instance, params)); } if ("nextup" === params.type) { return apiClient.getNextUpEpisodes(modifyQueryWithFilters(instance, { Limit: limit, Fields: "PrimaryImageAspectRatio,SeriesInfo,DateCreated,BasicSyncInfo", UserId: apiClient.getCurrentUserId(), ImageTypeLimit: 1, EnableImageTypes: "Primary,Backdrop,Thumb", EnableTotalRecordCount: false, SortBy: sortBy })); } if (!item) { instance.queryRecursive = true; var method = "getItems"; if ("MusicArtist" === params.type) { method = "getArtists"; } else if ("Person" === params.type) { method = "getPeople"; } return apiClient[method](apiClient.getCurrentUserId(), modifyQueryWithFilters(instance, { StartIndex: startIndex, Limit: limit, Fields: "PrimaryImageAspectRatio,SortName", ImageTypeLimit: 1, IncludeItemTypes: "MusicArtist" === params.type || "Person" === params.type ? null : params.type, Recursive: true, IsFavorite: "true" === params.IsFavorite || null, ArtistIds: params.artistId || null, SortBy: sortBy })); } if ("Genre" === item.Type || "MusicGenre" === item.Type || "Studio" === item.Type || "Person" === item.Type) { instance.queryRecursive = true; var query = { StartIndex: startIndex, Limit: limit, Fields: "PrimaryImageAspectRatio,SortName", Recursive: true, parentId: params.parentId, SortBy: sortBy }; if ("Studio" === item.Type) { query.StudioIds = item.Id; } else if ("Genre" === item.Type || "MusicGenre" === item.Type) { query.GenreIds = item.Id; } else if ("Person" === item.Type) { query.PersonIds = item.Id; } if ("MusicGenre" === item.Type) { query.IncludeItemTypes = "MusicAlbum"; } else if ("GameGenre" === item.Type) { query.IncludeItemTypes = "Game"; } else if ("movies" === item.CollectionType) { query.IncludeItemTypes = "Movie"; } else if ("tvshows" === item.CollectionType) { query.IncludeItemTypes = "Series"; } else if ("Genre" === item.Type) { query.IncludeItemTypes = "Movie,Series,Video"; } else if ("Person" === item.Type) { query.IncludeItemTypes = params.type; } return apiClient.getItems(apiClient.getCurrentUserId(), modifyQueryWithFilters(instance, query)); } return apiClient.getItems(apiClient.getCurrentUserId(), modifyQueryWithFilters(instance, { StartIndex: startIndex, Limit: limit, Fields: "PrimaryImageAspectRatio,SortName", ImageTypeLimit: 1, ParentId: item.Id, SortBy: sortBy })); } function getItem(params) { if ("Recordings" === params.type || "Programs" === params.type || "nextup" === params.type) { return Promise.resolve(null); } var apiClient = connectionManager.getApiClient(params.serverId); var itemId = params.genreId || params.musicGenreId || params.studioId || params.personId || params.parentId; if (itemId) { return apiClient.getItem(apiClient.getCurrentUserId(), itemId); } return Promise.resolve(null); } function showViewSettingsMenu() { var instance = this; require(["viewSettings"], function (ViewSettings) { new ViewSettings().show({ settingsKey: instance.getSettingsKey(), settings: instance.getViewSettings(), visibleSettings: instance.getVisibleViewSettings() }).then(function () { updateItemsContainerForViewType(instance); instance.itemsContainer.refreshItems(); }); }); } function showFilterMenu() { var instance = this; require(["filterMenu"], function (FilterMenu) { new FilterMenu().show({ settingsKey: instance.getSettingsKey(), settings: instance.getFilters(), visibleSettings: instance.getVisibleFilters(), onChange: instance.itemsContainer.refreshItems.bind(instance.itemsContainer), parentId: instance.params.parentId, itemTypes: instance.getItemTypes(), serverId: instance.params.serverId, filterMenuOptions: instance.getFilterMenuOptions() }).then(function () { instance.itemsContainer.refreshItems(); }); }); } function showSortMenu() { var instance = this; require(["sortMenu"], function (SortMenu) { new SortMenu().show({ settingsKey: instance.getSettingsKey(), settings: instance.getSortValues(), onChange: instance.itemsContainer.refreshItems.bind(instance.itemsContainer), serverId: instance.params.serverId, sortOptions: instance.getSortMenuOptions() }).then(function () { updateSortText(instance); updateAlphaPickerState(instance); instance.itemsContainer.refreshItems(); }); }); } function onNewItemClick() { var instance = this; require(["playlistEditor"], function (playlistEditor) { new playlistEditor().show({ items: [], serverId: instance.params.serverId }); }); } function hideOrShowAll(elems, hide) { for (var i = 0, length = elems.length; i < length; i++) { if (hide) { elems[i].classList.add("hide"); } else { elems[i].classList.remove("hide"); } } } function bindAll(elems, eventName, fn) { for (var i = 0, length = elems.length; i < length; i++) { elems[i].addEventListener(eventName, fn); } } function ItemsView(view, params) { function fetchData() { return getItems(self, params, self.currentItem).then(function (result) { if (null == self.totalItemCount) { self.totalItemCount = result.Items ? result.Items.length : result.length; } updateAlphaPickerState(self, self.totalItemCount); return result; }); } function getItemsHtml(items) { var settings = self.getViewSettings(); if ("list" === settings.imageType) { return listView.getListViewHtml({ items: items }); } var shape; var preferThumb; var preferDisc; var preferLogo; var defaultShape; var item = self.currentItem; var lines = settings.showTitle ? 2 : 0; if ("banner" === settings.imageType) { shape = "banner"; } else if ("disc" === settings.imageType) { shape = "square"; preferDisc = true; } else if ("logo" === settings.imageType) { shape = "backdrop"; preferLogo = true; } else if ("thumb" === settings.imageType) { shape = "backdrop"; preferThumb = true; } else if ("nextup" === params.type) { shape = "backdrop"; preferThumb = "thumb" === settings.imageType; } else if ("Programs" === params.type || "Recordings" === params.type) { shape = "true" === params.IsMovie ? "portrait" : "autoVertical"; preferThumb = "true" !== params.IsMovie ? "auto" : false; defaultShape = "true" === params.IsMovie ? "portrait" : "backdrop"; } else { shape = "autoVertical"; } var posterOptions = { shape: shape, showTitle: settings.showTitle, showYear: settings.showTitle, centerText: true, coverImage: true, preferThumb: preferThumb, preferDisc: preferDisc, preferLogo: preferLogo, overlayPlayButton: false, overlayMoreButton: true, overlayText: !settings.showTitle, defaultShape: defaultShape, action: "Audio" === params.type ? "playallfromhere" : null }; if ("nextup" === params.type) { posterOptions.showParentTitle = settings.showTitle; } else if ("Person" === params.type) { posterOptions.showYear = false; posterOptions.showParentTitle = false; lines = 1; } else if ("Audio" === params.type) { posterOptions.showParentTitle = settings.showTitle; } else if ("MusicAlbum" === params.type) { posterOptions.showParentTitle = settings.showTitle; } else if ("Episode" === params.type) { posterOptions.showParentTitle = settings.showTitle; } else if ("MusicArtist" === params.type) { posterOptions.showYear = false; lines = 1; } else if ("Programs" === params.type) { lines = settings.showTitle ? 1 : 0; var showParentTitle = settings.showTitle && "true" !== params.IsMovie; if (showParentTitle) { lines++; } var showAirTime = settings.showTitle && "Recordings" !== params.type; if (showAirTime) { lines++; } var showYear = settings.showTitle && "true" === params.IsMovie && "Recordings" === params.type; if (showYear) { lines++; } posterOptions = Object.assign(posterOptions, { inheritThumb: "Recordings" === params.type, context: "livetv", showParentTitle: showParentTitle, showAirTime: showAirTime, showAirDateTime: showAirTime, overlayPlayButton: false, overlayMoreButton: true, showYear: showYear, coverImage: true }); } else { posterOptions.showParentTitle = settings.showTitle; } posterOptions.lines = lines; posterOptions.items = items; if (item && "folders" === item.CollectionType) { posterOptions.context = "folders"; } return cardBuilder.getCardsHtml(posterOptions); } function initAlphaPicker() { self.scroller = view.querySelector(".scrollFrameY"); var alphaPickerElement = self.alphaPickerElement; alphaPickerElement.classList.add("alphaPicker-fixed-right"); alphaPickerElement.classList.add("focuscontainer-right"); self.itemsContainer.parentNode.classList.add("padded-right-withalphapicker"); self.alphaPicker = new alphaPicker({ element: alphaPickerElement, itemsContainer: layoutManager.tv ? self.itemsContainer : null, itemClass: "card", valueChangeEvent: layoutManager.tv ? null : "click" }); self.alphaPicker.on("alphavaluechanged", onAlphaPickerValueChanged); } function onAlphaPickerValueChanged() { self.alphaPicker.value(); self.itemsContainer.refreshItems(); } function setTitle(item) { Emby.Page.setTitle(getTitle(item) || ""); if (item && "playlists" === item.CollectionType) { hideOrShowAll(view.querySelectorAll(".btnNewItem"), false); } else { hideOrShowAll(view.querySelectorAll(".btnNewItem"), true); } } function getTitle(item) { if ("Recordings" === params.type) { return globalize.translate("Recordings"); } if ("Programs" === params.type) { if ("true" === params.IsMovie) { return globalize.translate("Movies"); } if ("true" === params.IsSports) { return globalize.translate("Sports"); } if ("true" === params.IsKids) { return globalize.translate("HeaderForKids"); } if ("true" === params.IsAiring) { return globalize.translate("HeaderOnNow"); } if ("true" === params.IsSeries) { return globalize.translate("Shows"); } if ("true" === params.IsNews) { return globalize.translate("News"); } return globalize.translate("Programs"); } if ("nextup" === params.type) { return globalize.translate("NextUp"); } if ("favoritemovies" === params.type) { return globalize.translate("FavoriteMovies"); } if (item) { return item.Name; } if ("Movie" === params.type) { return globalize.translate("Movies"); } if ("Series" === params.type) { return globalize.translate("Shows"); } if ("Season" === params.type) { return globalize.translate("Seasons"); } if ("Episode" === params.type) { return globalize.translate("Episodes"); } if ("MusicArtist" === params.type) { return globalize.translate("Artists"); } if ("MusicAlbum" === params.type) { return globalize.translate("Albums"); } if ("Audio" === params.type) { return globalize.translate("Songs"); } if ("Video" === params.type) { return globalize.translate("Videos"); } return void 0; } function play() { var currentItem = self.currentItem; if (currentItem && !self.hasFilters) { playbackManager.play({ items: [currentItem] }); } else { getItems(self, self.params, currentItem, null, null, 300).then(function (result) { playbackManager.play({ items: result.Items }); }); } } function queue() { var currentItem = self.currentItem; if (currentItem && !self.hasFilters) { playbackManager.queue({ items: [currentItem] }); } else { getItems(self, self.params, currentItem, null, null, 300).then(function (result) { playbackManager.queue({ items: result.Items }); }); } } function shuffle() { var currentItem = self.currentItem; if (currentItem && !self.hasFilters) { playbackManager.shuffle(currentItem); } else { getItems(self, self.params, currentItem, "Random", null, 300).then(function (result) { playbackManager.play({ items: result.Items }); }); } } var self = this; self.params = params; this.itemsContainer = view.querySelector(".itemsContainer"); if (params.parentId) { this.itemsContainer.setAttribute("data-parentid", params.parentId); } else if ("nextup" === params.type) { this.itemsContainer.setAttribute("data-monitor", "videoplayback"); } else if ("favoritemovies" === params.type) { this.itemsContainer.setAttribute("data-monitor", "markfavorite"); } else if ("Programs" === params.type) { this.itemsContainer.setAttribute("data-refreshinterval", "300000"); } var i; var length; var btnViewSettings = view.querySelectorAll(".btnViewSettings"); for (i = 0, length = btnViewSettings.length; i < length; i++) { btnViewSettings[i].addEventListener("click", showViewSettingsMenu.bind(this)); } var filterButtons = view.querySelectorAll(".btnFilter"); this.filterButtons = filterButtons; var hasVisibleFilters = this.getVisibleFilters().length; for (i = 0, length = filterButtons.length; i < length; i++) { var btnFilter = filterButtons[i]; btnFilter.addEventListener("click", showFilterMenu.bind(this)); if (hasVisibleFilters) { btnFilter.classList.remove("hide"); } else { btnFilter.classList.add("hide"); } } var sortButtons = view.querySelectorAll(".btnSort"); for (this.sortButtons = sortButtons, i = 0, length = sortButtons.length; i < length; i++) { var sortButton = sortButtons[i]; sortButton.addEventListener("click", showSortMenu.bind(this)); if ("nextup" !== params.type) { sortButton.classList.remove("hide"); } } this.btnSortText = view.querySelector(".btnSortText"); this.btnSortIcon = view.querySelector(".btnSortIcon"); bindAll(view.querySelectorAll(".btnNewItem"), "click", onNewItemClick.bind(this)); this.alphaPickerElement = view.querySelector(".alphaPicker"); self.itemsContainer.fetchData = fetchData; self.itemsContainer.getItemsHtml = getItemsHtml; view.addEventListener("viewshow", function (e) { var isRestored = e.detail.isRestored; if (!isRestored) { loading.show(); updateSortText(self); updateItemsContainerForViewType(self); } setTitle(null); getItem(params).then(function (item) { setTitle(item); self.currentItem = item; var refresh = !isRestored; self.itemsContainer.resume({ refresh: refresh }).then(function () { loading.hide(); if (refresh) { focusManager.autoFocus(self.itemsContainer); } }); if (!isRestored && item && "PhotoAlbum" !== item.Type) { initAlphaPicker(); } var itemType = item ? item.Type : null; if ("MusicGenre" === itemType || "Programs" !== params.type && "Channel" !== itemType) { hideOrShowAll(view.querySelectorAll(".btnPlay"), false); } else { hideOrShowAll(view.querySelectorAll(".btnPlay"), true); } if ("MusicGenre" === itemType || "Programs" !== params.type && "nextup" !== params.type && "Channel" !== itemType) { hideOrShowAll(view.querySelectorAll(".btnShuffle"), false); } else { hideOrShowAll(view.querySelectorAll(".btnShuffle"), true); } if (item && playbackManager.canQueue(item)) { hideOrShowAll(view.querySelectorAll(".btnQueue"), false); } else { hideOrShowAll(view.querySelectorAll(".btnQueue"), true); } }); if (!isRestored) { bindAll(view.querySelectorAll(".btnPlay"), "click", play); bindAll(view.querySelectorAll(".btnQueue"), "click", queue); bindAll(view.querySelectorAll(".btnShuffle"), "click", shuffle); } this.alphaNumericShortcuts = new AlphaNumericShortcuts({ itemsContainer: self.itemsContainer }); }); view.addEventListener("viewhide", function (e) { var itemsContainer = self.itemsContainer; if (itemsContainer) { itemsContainer.pause(); } var alphaNumericShortcuts = self.alphaNumericShortcuts; if (alphaNumericShortcuts) { alphaNumericShortcuts.destroy(); self.alphaNumericShortcuts = null; } }); view.addEventListener("viewdestroy", function () { if (self.listController) { self.listController.destroy(); } if (self.alphaPicker) { self.alphaPicker.off("alphavaluechanged", onAlphaPickerValueChanged); self.alphaPicker.destroy(); } self.currentItem = null; self.scroller = null; self.itemsContainer = null; self.filterButtons = null; self.sortButtons = null; self.btnSortText = null; self.btnSortIcon = null; self.alphaPickerElement = null; }); } ItemsView.prototype.getFilters = function () { var basekey = this.getSettingsKey(); return { IsPlayed: "true" === userSettings.getFilter(basekey + "-filter-IsPlayed"), IsUnplayed: "true" === userSettings.getFilter(basekey + "-filter-IsUnplayed"), IsFavorite: "true" === userSettings.getFilter(basekey + "-filter-IsFavorite"), IsResumable: "true" === userSettings.getFilter(basekey + "-filter-IsResumable"), Is4K: "true" === userSettings.getFilter(basekey + "-filter-Is4K"), IsHD: "true" === userSettings.getFilter(basekey + "-filter-IsHD"), IsSD: "true" === userSettings.getFilter(basekey + "-filter-IsSD"), Is3D: "true" === userSettings.getFilter(basekey + "-filter-Is3D"), VideoTypes: userSettings.getFilter(basekey + "-filter-VideoTypes"), SeriesStatus: userSettings.getFilter(basekey + "-filter-SeriesStatus"), HasSubtitles: userSettings.getFilter(basekey + "-filter-HasSubtitles"), HasTrailer: userSettings.getFilter(basekey + "-filter-HasTrailer"), HasSpecialFeature: userSettings.getFilter(basekey + "-filter-HasSpecialFeature"), HasThemeSong: userSettings.getFilter(basekey + "-filter-HasThemeSong"), HasThemeVideo: userSettings.getFilter(basekey + "-filter-HasThemeVideo"), GenreIds: userSettings.getFilter(basekey + "-filter-GenreIds") }; }; ItemsView.prototype.getSortValues = function () { var basekey = this.getSettingsKey(); return { sortBy: userSettings.getFilter(basekey + "-sortby") || this.getDefaultSortBy(), sortOrder: "Descending" === userSettings.getFilter(basekey + "-sortorder") ? "Descending" : "Ascending" }; }; ItemsView.prototype.getDefaultSortBy = function () { var params = this.params; var sortNameOption = this.getNameSortOption(params); if (params.type) { return sortNameOption.value; } return "IsFolder," + sortNameOption.value; }; ItemsView.prototype.getSortMenuOptions = function () { var sortBy = []; var params = this.params; if ("Programs" === params.type) { sortBy.push({ name: globalize.translate("AirDate"), value: "StartDate,SortName" }); } var option = this.getNameSortOption(params); if (option) { sortBy.push(option); } option = this.getCommunityRatingSortOption(); if (option) { sortBy.push(option); } option = this.getCriticRatingSortOption(); if (option) { sortBy.push(option); } if ("Programs" !== params.type) { sortBy.push({ name: globalize.translate("DateAdded"), value: "DateCreated,SortName" }); } option = this.getDatePlayedSortOption(); if (option) { sortBy.push(option); } if (!params.type) { option = this.getNameSortOption(params); sortBy.push({ name: globalize.translate("Folders"), value: "IsFolder," + option.value }); } sortBy.push({ name: globalize.translate("ParentalRating"), value: "OfficialRating,SortName" }); option = this.getPlayCountSortOption(); if (option) { sortBy.push(option); } sortBy.push({ name: globalize.translate("ReleaseDate"), value: "ProductionYear,PremiereDate,SortName" }); sortBy.push({ name: globalize.translate("Runtime"), value: "Runtime,SortName" }); return sortBy; }; ItemsView.prototype.getNameSortOption = function (params) { if ("Episode" === params.type) { return { name: globalize.translate("Name"), value: "SeriesName,SortName" }; } return { name: globalize.translate("Name"), value: "SortName" }; }; ItemsView.prototype.getPlayCountSortOption = function () { if ("Programs" === this.params.type) { return null; } return { name: globalize.translate("PlayCount"), value: "PlayCount,SortName" }; }; ItemsView.prototype.getDatePlayedSortOption = function () { if ("Programs" === this.params.type) { return null; } return { name: globalize.translate("DatePlayed"), value: "DatePlayed,SortName" }; }; ItemsView.prototype.getCriticRatingSortOption = function () { if ("Programs" === this.params.type) { return null; } return { name: globalize.translate("CriticRating"), value: "CriticRating,SortName" }; }; ItemsView.prototype.getCommunityRatingSortOption = function () { return { name: globalize.translate("CommunityRating"), value: "CommunityRating,SortName" }; }; ItemsView.prototype.getVisibleFilters = function () { var filters = []; var params = this.params; if (!("nextup" === params.type)) { if ("Programs" === params.type) { filters.push("Genres"); } else { params.type; filters.push("IsUnplayed"); filters.push("IsPlayed"); if (!params.IsFavorite) { filters.push("IsFavorite"); } filters.push("IsResumable"); filters.push("VideoType"); filters.push("HasSubtitles"); filters.push("HasTrailer"); filters.push("HasSpecialFeature"); filters.push("HasThemeSong"); filters.push("HasThemeVideo"); } } return filters; }; ItemsView.prototype.setFilterStatus = function (hasFilters) { this.hasFilters = hasFilters; var filterButtons = this.filterButtons; if (filterButtons.length) { for (var i = 0, length = filterButtons.length; i < length; i++) { var btnFilter = filterButtons[i]; var bubble = btnFilter.querySelector(".filterButtonBubble"); if (!bubble) { if (!hasFilters) { continue; } btnFilter.insertAdjacentHTML("afterbegin", '<div class="filterButtonBubble">!</div>'); btnFilter.classList.add("btnFilterWithBubble"); bubble = btnFilter.querySelector(".filterButtonBubble"); } if (hasFilters) { bubble.classList.remove("hide"); } else { bubble.classList.add("hide"); } } } }; ItemsView.prototype.getFilterMenuOptions = function () { var params = this.params; return { IsAiring: params.IsAiring, IsMovie: params.IsMovie, IsSports: params.IsSports, IsKids: params.IsKids, IsNews: params.IsNews, IsSeries: params.IsSeries, Recursive: this.queryRecursive }; }; ItemsView.prototype.getVisibleViewSettings = function () { var item = (this.params, this.currentItem); var fields = ["showTitle"]; if (!item || "PhotoAlbum" !== item.Type && "ChannelFolderItem" !== item.Type) { fields.push("imageType"); } fields.push("viewType"); return fields; }; ItemsView.prototype.getViewSettings = function () { var basekey = this.getSettingsKey(); var params = this.params; var item = this.currentItem; var showTitle = userSettings.get(basekey + "-showTitle"); if ("true" === showTitle) { showTitle = true; } else if ("false" === showTitle) { showTitle = false; } else if ("Programs" === params.type || "Recordings" === params.type || "Person" === params.type || "nextup" === params.type || "Audio" === params.type || "MusicAlbum" === params.type || "MusicArtist" === params.type) { showTitle = true; } else if (item && "PhotoAlbum" !== item.Type) { showTitle = true; } var imageType = userSettings.get(basekey + "-imageType"); if (!imageType && "nextup" === params.type) { imageType = "thumb"; } return { showTitle: showTitle, showYear: "false" !== userSettings.get(basekey + "-showYear"), imageType: imageType || "primary", viewType: userSettings.get(basekey + "-viewType") || "images" }; }; ItemsView.prototype.getItemTypes = function () { var params = this.params; if ("nextup" === params.type) { return ["Episode"]; } if ("Programs" === params.type) { return ["Program"]; } return []; }; ItemsView.prototype.getSettingsKey = function () { var values = []; values.push("items"); var params = this.params; if (params.type) { values.push(params.type); } else if (params.parentId) { values.push(params.parentId); } if (params.IsAiring) { values.push("IsAiring"); } if (params.IsMovie) { values.push("IsMovie"); } if (params.IsKids) { values.push("IsKids"); } if (params.IsSports) { values.push("IsSports"); } if (params.IsNews) { values.push("IsNews"); } if (params.IsSeries) { values.push("IsSeries"); } if (params.IsFavorite) { values.push("IsFavorite"); } if (params.genreId) { values.push("Genre"); } if (params.musicGenreId) { values.push("MusicGenre"); } if (params.studioId) { values.push("Studio"); } if (params.personId) { values.push("Person"); } if (params.parentId) { values.push("Folder"); } return values.join("-"); }; return ItemsView; });
1
15,180
Are we sure this should be translated by the web client? It was unclear in chat exactly what's getting translated here.
jellyfin-jellyfin-web
js
@@ -253,7 +253,7 @@ func (c *collection) actionToWrites(a *driver.Action) ([]*pb.Write, string, erro docName = driver.UniqueString() newName = docName } - w, err = c.putWrite(a.Doc, docName, &pb.Precondition{ConditionType: &pb.Precondition_Exists{false}}) + w, err = c.putWrite(a.Doc, docName, &pb.Precondition{ConditionType: &pb.Precondition_Exists{Exists: false}}) case driver.Replace: // If the given document has a revision, use it as the precondition (it implies existence).
1
// Copyright 2019 The Go Cloud Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package firedocstore provides an implementation of the docstore API for Google // Cloud Firestore. package firedocstore // import "gocloud.dev/internal/docstore/firedocstore" import ( "bytes" "context" "fmt" "io" "reflect" "regexp" "strings" vkit "cloud.google.com/go/firestore/apiv1" "gocloud.dev/internal/docstore" "gocloud.dev/internal/docstore/driver" "gocloud.dev/internal/gcerr" pb "google.golang.org/genproto/googleapis/firestore/v1" "google.golang.org/grpc/metadata" ) type collection struct { client *vkit.Client dbPath string // e.g. "projects/P/databases/(default)" collPath string // e.g. "projects/P/databases/(default)/documents/MyCollection" nameField string } // OpenCollection creates a *docstore.Collection representing a Firestore collection. // // collPath is the path to the collection, starting from a root collection. It may // refer to a top-level collection, like "States", or it may be a path to a nested // collection, like "States/Wisconsin/Cities". // // firedocstore requires that a single string field, nameField, be designated the // primary key. Its values must be unique over all documents in the collection, and // the primary key must be provided to retrieve a document. func OpenCollection(client *vkit.Client, projectID, collPath, nameField string) *docstore.Collection { return docstore.NewCollection(newCollection(client, projectID, collPath, nameField)) } func newCollection(client *vkit.Client, projectID, collPath, nameField string) *collection { dbPath := fmt.Sprintf("projects/%s/databases/(default)", projectID) return &collection{ client: client, dbPath: dbPath, collPath: fmt.Sprintf("%s/documents/%s", dbPath, collPath), nameField: nameField, } } // RunActions implements driver.RunActions. func (c *collection) RunActions(ctx context.Context, actions []*driver.Action) (int, error) { groups := groupActions(actions) nRun := 0 // number of actions successfully run var n int var err error for _, g := range groups { if g[0].Kind == driver.Get { n, err = c.runGets(ctx, g) nRun += n } else { err = c.runWrites(ctx, g) // Writes happen atomically: all or none. if err != nil { nRun += len(g) } } if err != nil { return nRun, err } } return nRun, nil } // Break the actions into subsequences, each of which can be done with a single RPC. // - Consecutive writes are grouped together. // - Consecutive gets with the same field paths are grouped together. func groupActions(actions []*driver.Action) [][]*driver.Action { // TODO(jba): Currently we don't have any transforms, but when we do, apply the // constraint that at most one transform per document is allowed in a given request // (see write.proto). var ( groups [][]*driver.Action // the actions, grouped; the return value cur []*driver.Action // the group currently being constructed ) collect := func() { // called when the current group is known to be finished if len(cur) > 0 { groups = append(groups, cur) cur = nil } } for _, a := range actions { if len(cur) > 0 { // If this action isn't a Get and the current group consists of Gets, finish the current group. if a.Kind != driver.Get && cur[0].Kind == driver.Get { collect() } // If this action is a Get and either (1) the current group consists of writes, or (2) the current group // of gets has a different list of field paths to retrieve, then finish the current group. // (The BatchGetDocuments RPC we use for Gets supports only a single set of field paths.) if a.Kind == driver.Get && (cur[0].Kind != driver.Get || !fpsEqual(cur[0].FieldPaths, a.FieldPaths)) { collect() } } cur = append(cur, a) } collect() return groups } // Run a sequence of Get actions by calling the BatchGetDocuments RPC. func (c *collection) runGets(ctx context.Context, gets []*driver.Action) (int, error) { req, err := c.newGetRequest(gets) if err != nil { return 0, err } streamClient, err := c.client.BatchGetDocuments(withResourceHeader(ctx, req.Database), req) if err != nil { return 0, err } // BatchGetDocuments is a streaming RPC. // Read the stream and organize by path, since results may arrive out of order. resps := map[string]*pb.BatchGetDocumentsResponse{} for { resp, err := streamClient.Recv() if err == io.EOF { break } if err != nil { return 0, err } switch r := resp.Result.(type) { case *pb.BatchGetDocumentsResponse_Found: resps[r.Found.Name] = resp case *pb.BatchGetDocumentsResponse_Missing: resps[r.Missing] = nil default: return 0, gcerr.Newf(gcerr.Internal, nil, "unknown BatchGetDocumentsResponse result type") } } // Now process the result for each input document. for i, path := range req.Documents { resp, ok := resps[path] if !ok { return i, gcerr.Newf(gcerr.Internal, nil, "no BatchGetDocumentsResponse for %q", path) } if resp == nil { return i, gcerr.Newf(gcerr.NotFound, nil, "document at path %q is missing", path) } pdoc := resp.Result.(*pb.BatchGetDocumentsResponse_Found).Found // TODO(jba): support field paths in decoding. if err := decodeDoc(pdoc, gets[i].Doc /*, gets[i].FieldPaths */); err != nil { return i, err } // Set the revision field in the document, if it exists, to the update time. // TODO(jba): uncomment this line when we implement revision fields. //_ = gets[i].Doc.SetField(docstore.RevisionField, pdoc.UpdateTime) } return len(gets), nil } func (c *collection) newGetRequest(gets []*driver.Action) (*pb.BatchGetDocumentsRequest, error) { req := &pb.BatchGetDocumentsRequest{Database: c.dbPath} for _, a := range gets { docName, err := c.docName(a.Doc) if err != nil { return nil, err } req.Documents = append(req.Documents, c.collPath+"/"+docName) } // groupActions has already made sure that all the actions have the same field paths, // so just use the first one. var fps []string // field paths that will go in the mask for _, fp := range gets[0].FieldPaths { fps = append(fps, toServiceFieldPath(fp)) } if fps != nil { req.Mask = &pb.DocumentMask{FieldPaths: fps} } return req, nil } // runWrites executes all the actions in a single RPC. The actions are done atomically, // so either they all succeed or they all fail. func (c *collection) runWrites(ctx context.Context, actions []*driver.Action) error { // Convert each action to one or more writes, collecting names for newly created // documents along the way. var pws []*pb.Write newNames := make([]string, len(actions)) // from Creates without a name for i, a := range actions { ws, nn, err := c.actionToWrites(a) if err != nil { return err } newNames[i] = nn pws = append(pws, ws...) } // Call the Commit RPC with the list of writes. wrs, err := c.commit(ctx, pws) if err != nil { return err } // Now that we've successfully done the action, set the names for newly created docs // that weren't given a name by the caller. for i, nn := range newNames { if nn != "" { _ = actions[i].Doc.SetField(c.nameField, nn) } } // Set the revision fields of all docs to the returned update times. // TODO(jba): uncomment when we support revisions. _ = wrs // for i, wr := range wrs { // // Ignore errors. It's fine if the doc doesn't have a revision field. // // (We also could get an error if that field is unsettable for some reason, but // // we just decide to ignore those as well.) // _ = actions[i].Doc.SetField(docstore.RevisionField, wr.UpdateTime) // } return nil } // Convert an action to one or more Firestore Write protos. func (c *collection) actionToWrites(a *driver.Action) ([]*pb.Write, string, error) { docName, err := c.docName(a.Doc) if err != nil { return nil, "", err } var ( w *pb.Write ws []*pb.Write newName string // for Create with no name ) switch a.Kind { case driver.Create: // Make a name for this document if it doesn't have one. if docName == "" { docName = driver.UniqueString() newName = docName } w, err = c.putWrite(a.Doc, docName, &pb.Precondition{ConditionType: &pb.Precondition_Exists{false}}) case driver.Replace: // If the given document has a revision, use it as the precondition (it implies existence). pc, err := revisionPrecondition(a.Doc) if err != nil { return nil, "", err } // Otherwise, just require that the document exists. if pc == nil { pc = &pb.Precondition{ConditionType: &pb.Precondition_Exists{true}} } w, err = c.putWrite(a.Doc, docName, pc) case driver.Put: pc, err := revisionPrecondition(a.Doc) if err != nil { return nil, "", err } w, err = c.putWrite(a.Doc, docName, pc) case driver.Update: ws, err = c.updateWrites(a.Doc, docName, a.Mods) case driver.Delete: w, err = c.deleteWrite(a.Doc, docName) default: err = gcerr.Newf(gcerr.Internal, nil, "bad action %+v", a) } if err != nil { return nil, "", err } if ws == nil { ws = []*pb.Write{w} } return ws, newName, nil } func (c *collection) putWrite(doc driver.Document, docName string, pc *pb.Precondition) (*pb.Write, error) { pdoc, err := encodeDoc(doc) if err != nil { return nil, err } pdoc.Name = c.collPath + "/" + docName return &pb.Write{ Operation: &pb.Write_Update{pdoc}, CurrentDocument: pc, }, nil } func (c *collection) deleteWrite(doc driver.Document, docName string) (*pb.Write, error) { pc, err := revisionPrecondition(doc) if err != nil { return nil, err } return &pb.Write{ Operation: &pb.Write_Delete{c.collPath + "/" + docName}, CurrentDocument: pc, }, nil } // updateWrites returns a slice of writes because we may need two: one for setting // and deleting values, the other for transforms. func (c *collection) updateWrites(doc driver.Document, docName string, mods []driver.Mod) ([]*pb.Write, error) { pc, err := revisionPrecondition(doc) if err != nil { return nil, err } // If there is no revision in the document, add a precondition that the document exists. if pc == nil { pc = &pb.Precondition{ConditionType: &pb.Precondition_Exists{true}} } pdoc := &pb.Document{ Name: c.collPath + "/" + docName, Fields: map[string]*pb.Value{}, } // To update a document, we need to send: // - A document with all the fields we want to add or change. // - A mask with the field paths of all the fields we want to add, change or delete. var fps []string // field paths that will go in the mask for _, m := range mods { // The field path of every mod belongs in the mask. fps = append(fps, toServiceFieldPath(m.FieldPath)) // If m.Value is nil, we want to delete it. In that case, we put the field in // the mask but not in the doc. if m.Value != nil { pv, err := encodeValue(m.Value) if err != nil { return nil, err } if err := setAtFieldPath(pdoc.Fields, m.FieldPath, pv); err != nil { return nil, err } } } w := &pb.Write{ Operation: &pb.Write_Update{pdoc}, UpdateMask: &pb.DocumentMask{FieldPaths: fps}, CurrentDocument: pc, } // For now, we don't have any transforms. return []*pb.Write{w}, nil } //////////////// // From memdocstore/mem.go. // setAtFieldPath sets m's value at fp to val. It creates intermediate maps as // needed. It returns an error if a non-final component of fp does not denote a map. func setAtFieldPath(m map[string]*pb.Value, fp []string, val *pb.Value) error { m2, err := getParentMap(m, fp, true) if err != nil { return err } m2[fp[len(fp)-1]] = val return nil } // getParentMap returns the map that directly contains the given field path; // that is, the value of m at the field path that excludes the last component // of fp. If a non-map is encountered along the way, an InvalidArgument error is // returned. If nil is encountered, nil is returned unless create is true, in // which case a map is added at that point. func getParentMap(m map[string]*pb.Value, fp []string, create bool) (map[string]*pb.Value, error) { for _, k := range fp[:len(fp)-1] { if m[k] == nil { if !create { return nil, nil } m[k] = &pb.Value{ValueType: &pb.Value_MapValue{&pb.MapValue{Fields: map[string]*pb.Value{}}}} } mv := m[k].GetMapValue() if mv == nil { return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "invalid field path %q at %q", strings.Join(fp, "."), k) } m = mv.Fields } return m, nil } //////////////// // From fieldpath.go in cloud.google.com/go/firestore. // Convert a docstore field path, which is a []string, into the kind of field path // that the Firestore service expects: a string of dot-separated components, some of // which may be quoted. func toServiceFieldPath(fp []string) string { cs := make([]string, len(fp)) for i, c := range fp { cs[i] = toServiceFieldPathComponent(c) } return strings.Join(cs, ".") } // Google SQL syntax for an unquoted field. var unquotedFieldRegexp = regexp.MustCompile("^[A-Za-z_][A-Za-z_0-9]*$") // toServiceFieldPathComponent returns a string that represents key and is a valid // Firestore field path component. Components must be quoted with backticks if // they don't match the above regexp. func toServiceFieldPathComponent(key string) string { if unquotedFieldRegexp.MatchString(key) { return key } var buf bytes.Buffer buf.WriteRune('`') for _, r := range key { if r == '`' || r == '\\' { buf.WriteRune('\\') } buf.WriteRune(r) } buf.WriteRune('`') return buf.String() } // revisionPrecondition returns a Firestore precondition that asserts that the stored document's // revision matches the revision of doc. func revisionPrecondition(doc driver.Document) (*pb.Precondition, error) { // TODO(jba): implement when adding support for revisions return nil, nil } // TODO(jba): make sure we enforce these Firestore commit constraints: // - At most one `transform` per document is allowed in a given request. // - An `update` cannot follow a `transform` on the same document in a given request. // These should actually happen in groupActions. func (c *collection) commit(ctx context.Context, ws []*pb.Write) ([]*pb.WriteResult, error) { req := &pb.CommitRequest{ Database: c.dbPath, Writes: ws, } res, err := c.client.Commit(withResourceHeader(ctx, req.Database), req) if err != nil { return nil, err } if len(res.WriteResults) != len(ws) { return nil, gcerr.Newf(gcerr.Internal, nil, "wrong number of WriteResults from firestore commit") } return res.WriteResults, nil } // docName returns the name of the document. This is the value of // the field called c.nameField, and it must be a string. // If the field doesn't exist, docName returns the empty string, rather // than an error. This is to support the Create action, which can // create new document names. func (c *collection) docName(doc driver.Document) (string, error) { n, err := doc.GetField(c.nameField) if err != nil { // Return missing field as empty string. return "", nil } // Check that the reflect kind is String so we can support any type whose underlying type // is string. E.g. "type DocName string". vn := reflect.ValueOf(n) if vn.Kind() != reflect.String { return "", fmt.Errorf("key field %q with value %v is not a string", c.nameField, n) } return vn.String(), nil } // Report whether two lists of field paths are equal. func fpsEqual(fps1, fps2 [][]string) bool { // TODO?: We really care about sets of field paths, but that's too tedious to determine. if len(fps1) != len(fps2) { return false } for i, fp1 := range fps1 { if !fpEqual(fp1, fps2[i]) { return false } } return true } // Report whether two field paths are equal. func fpEqual(fp1, fp2 []string) bool { if len(fp1) != len(fp2) { return false } for i, s1 := range fp1 { if s1 != fp2[i] { return false } } return true } func (c *collection) ErrorCode(err error) gcerr.ErrorCode { return gcerr.GRPCCode(err) } // resourcePrefixHeader is the name of the metadata header used to indicate // the resource being operated on. const resourcePrefixHeader = "google-cloud-resource-prefix" // withResourceHeader returns a new context that includes resource in a special header. // Firestore uses the resource header for routing. func withResourceHeader(ctx context.Context, resource string) context.Context { md, _ := metadata.FromOutgoingContext(ctx) md = md.Copy() md[resourcePrefixHeader] = []string{resource} return metadata.NewOutgoingContext(ctx, md) }
1
15,801
same oneof issue.
google-go-cloud
go
@@ -258,7 +258,7 @@ module Bolt config.project.puppetfile, config.project.managed_moduledir, config.project.project_file, - config.module_install) + @plugins.resolve_references(config.module_install)) end # Generate Puppet data types from project modules.
1
# frozen_string_literal: true require 'benchmark' require_relative '../bolt/plan_creator' require_relative '../bolt/util' module Bolt class Application attr_reader :analytics, :config, :executor, :inventory, :logger, :pal, :plugins private :analytics, :config, :executor, :inventory, :logger, :pal, :plugins def initialize( analytics:, config:, executor:, inventory:, pal:, plugins: ) @analytics = analytics @config = config @executor = executor @inventory = inventory @logger = Bolt::Logger.logger(self) @pal = pal @plugins = plugins end # Shuts down the application. # def shutdown executor.shutdown end # Apply Puppet manifest code to a list of targets. # # @param manifest [String, NilClass] The path to a Puppet manifest file. # @param targets [Array[String]] The targets to run on. # @param code [String] Puppet manifest code to apply. # @param noop [Boolean] Whether to apply in no-operation mode. # @return [Bolt::ResultSet] # def apply(manifest, targets, code: '', noop: false) manifest_code = if manifest Bolt::Util.validate_file('manifest', manifest) File.read(File.expand_path(manifest)) else code end targets = inventory.get_targets(targets) Puppet[:tasks] = false ast = pal.parse_manifest(manifest_code, manifest) if defined?(ast.body) && (ast.body.is_a?(Puppet::Pops::Model::HostClassDefinition) || ast.body.is_a?(Puppet::Pops::Model::ResourceTypeDefinition)) message = "Manifest only contains definitions and will result in no changes on the targets. "\ "Definitions must be declared for their resources to be applied. You can read more "\ "about defining and declaring classes and types in the Puppet documentation at "\ "https://puppet.com/docs/puppet/latest/lang_classes.html and "\ "https://puppet.com/docs/puppet/latest/lang_defined_types.html" Bolt::Logger.warn("empty_manifest", message) end # Apply logging looks like plan logging executor.publish_event(type: :plan_start, plan: nil) with_benchmark do apply_prep_results = pal.in_plan_compiler(executor, inventory, plugins.puppetdb_client) do |compiler| compiler.call_function('apply_prep', targets, '_catch_errors' => true) end apply_results = pal.with_bolt_executor(executor, inventory, plugins.puppetdb_client) do Puppet.lookup(:apply_executor) .apply_ast(ast, apply_prep_results.ok_set.targets, catch_errors: true, noop: noop) end Bolt::ResultSet.new(apply_prep_results.error_set.results + apply_results.results) end end # Run a command on a list of targets. # # @param command [String] The command. # @param targets [Array[String]] The targets to run on. # @param env_vars [Hash] Environment variables to set on the target. # @return [Bolt::ResultSet] # def run_command(command, targets, env_vars: nil) targets = inventory.get_targets(targets) with_benchmark do executor.run_command(targets, command, env_vars: env_vars) end end # Download a file from a list of targets to a directory on the controller. # # @param source [String] The path to the file on the targets. # @param destination [String] The path to the directory on the controller. # @param targets [Array[String]] The targets to run on. # @return [Bolt::ResultSet] # def download_file(source, destination, targets) destination = File.expand_path(destination, Dir.pwd) targets = inventory.get_targets(targets) with_benchmark do executor.download_file(targets, source, destination) end end # Upload a file from the controller to a list of targets. # # @param source [String] The path to the file on the controller. # @param destination [String] The destination path on the targets. # @param targets [Array[String]] The targets to run on. # @return [Bolt::ResultSet] # def upload_file(source, destination, targets) source = find_file(source) targets = inventory.get_targets(targets) Bolt::Util.validate_file('source file', source, true) with_benchmark do executor.upload_file(targets, source, destination) end end # Show groups in the inventory. # # @return [Hash] # def list_groups { count: inventory.group_names.count, groups: inventory.group_names.sort, inventory: { default: config.default_inventoryfile.to_s, source: inventory.source } } end # Show available guides. # # @param guides [Hash] A map of topics to paths to guides. # @param outputter [Bolt::Outputter] An outputter instance. # @return [Boolean] # def list_guides { topics: load_guides.keys } end # Show a guide. # # @param topic [String] The topic to show. # @param guides [Hash] A map of topics to paths to guides. # @param outputter [Bolt::Outputter] An outputter instance. # @return [Boolean] # def show_guide(topic) if (path = load_guides[topic]) analytics.event('Guide', 'known_topic', label: topic) begin guide = Bolt::Util.read_yaml_hash(path, 'guide') rescue SystemCallError => e raise Bolt::FileError("#{e.message}: unable to load guide page", filepath) end # Make sure both topic and guide keys are defined unless (%w[topic guide] - guide.keys).empty? msg = "Guide file #{path} must have a 'topic' key and 'guide' key, but has #{guide.keys} keys." raise Bolt::Error.new(msg, 'bolt/invalid-guide') end Bolt::Util.symbolize_top_level_keys(guide) else analytics.event('Guide', 'unknown_topic', label: topic) raise Bolt::Error.new( "Unknown topic '#{topic}'. For a list of available topics, run 'bolt guide'.", 'bolt/unknown-topic' ) end end # Show inventory information. # # @param targets [Array[String]] The targets to show. # @return [Hash] # def show_inventory(targets = nil) targets = group_targets_by_source(targets || ['all']) { adhoc: { count: targets[:adhoc].count, targets: targets[:adhoc].map(&:detail) }, inventory: { count: targets[:inventory].count, targets: targets[:inventory].map(&:detail), file: (inventory.source || config.default_inventoryfile).to_s, default: config.default_inventoryfile.to_s }, targets: targets.values.flatten.map(&:detail), count: targets.values.flatten.count } end # Lookup a value with Hiera. # # @param key [String] The key to look up in the hierarchy. # @param targets [Array[String]] The targets to use as context. # @param vars [Hash] Variables to set in the scope. # @return [Bolt::ResultSet, String] The result of the lookup. # def lookup(key, targets, vars: {}) executor.publish_event(type: :plan_start, plan: nil) with_benchmark do pal.lookup(key, inventory.get_targets(targets), inventory, executor, plan_vars: vars) end end # Lookup a value with Hiera using plan_hierarchy. # # @param key [String] The key to lookup up in the plan_hierarchy. # @param vars [Hash] Variables to set in the scope. # @return [String] The result of the lookup. # def plan_lookup(key, vars: {}) pal.plan_hierarchy_lookup(key, plan_vars: vars) end # Add a new module to the project. # # @param name [String] The name of the module to add. # @param outputter [Bolt::Outputter] An outputter instance. # @return [Boolean] # def add_module(name, outputter) assert_project_file(config.project) installer = Bolt::ModuleInstaller.new(outputter, pal) installer.add(name, config.project.modules, config.project.puppetfile, config.project.managed_moduledir, config.project.project_file, config.module_install) end # Generate Puppet data types from project modules. # # @return [Boolean] # def generate_types pal.generate_types(cache: true) end # Install the project's modules. # # @param outputter [Bolt::Outputter] An outputter instance. # @param force [Boolean] Forcibly install modules. # @param resolve [Boolean] Resolve module dependencies. # @return [Boolean] # def install_modules(outputter, force: false, resolve: true) assert_project_file(config.project) if config.project.modules.empty? && resolve outputter.print_message( "Project configuration file #{config.project.project_file} does not "\ "specify any module dependencies. Nothing to do." ) return true end installer = Bolt::ModuleInstaller.new(outputter, pal) installer.install(config.project.modules, config.project.puppetfile, config.project.managed_moduledir, config.module_install, force: force, resolve: resolve) end # Show modules available to the project. # # @return [Hash] A map of module directories to module definitions. # def list_modules pal.list_modules end # Show module information. # # @param name [String] The name of the module. # @return [Hash] The module information. # def show_module(name) pal.show_module(name) end # Convert a YAML plan to a Puppet language plan. # # @param plan [String] The plan to convert. Can be a plan name or a path. # @return [String] The converted plan. # def convert_plan(plan) pal.convert_plan(plan) end # Create a new project-level plan. # # @param name [String] The name of the new plan. # @param puppet [Boolean] Create a Puppet language plan. # @param plan_script [String] Reference to the script to run in the new plan. # @return [Boolean] # def new_plan(name, puppet: false, plan_script: nil) Bolt::PlanCreator.validate_plan_name(config.project, name) if plan_script && !config.future&.fetch('file_paths', false) raise Bolt::CLIError, "The --script flag can only be used if future.file_paths is " \ "configured in bolt-project.yaml." end if plan_script Bolt::Util.validate_file('script', find_file(plan_script)) end Bolt::PlanCreator.create_plan(config.project.plans_path, name, is_puppet: puppet, script: plan_script) end # Run a plan. # # @param plan [String] The plan to run. # @param targets [Array[String], NilClass] The targets to pass to the plan. # @param params [Hash] Parameters to pass to the plan. # @return [Bolt::PlanResult] # def run_plan(plan, targets, params: {}) if targets && targets.any? if params['nodes'] || params['targets'] key = params.include?('nodes') ? 'nodes' : 'targets' raise Bolt::CLIError, "A plan's '#{key}' parameter can be specified using the --#{key} option, but in that " \ "case it must not be specified as a separate #{key}=<value> parameter nor included " \ "in the JSON data passed in the --params option" end plan_params = pal.get_plan_info(plan)['parameters'] target_param = plan_params.dig('targets', 'type') =~ /TargetSpec/ node_param = plan_params.include?('nodes') if node_param && target_param msg = "Plan parameters include both 'nodes' and 'targets' with type 'TargetSpec', " \ "neither will populated with the value for --nodes or --targets." Bolt::Logger.warn("nodes_targets_parameters", msg) elsif node_param params['nodes'] = targets.join(',') elsif target_param params['targets'] = targets.join(',') end end plan_context = { plan_name: plan, params: params } executor.start_plan(plan_context) result = pal.run_plan(plan, params, executor, inventory, plugins.puppetdb_client) executor.finish_plan(result) result end # Show plan information. # # @param plan [String] The name of the plan to show. # @return [Hash] # def show_plan(plan) pal.get_plan_info(plan) end # List plans available to the project. # # @param filter [String] A substring to filter plans by. # @return [Hash] # def list_plans(filter: nil) { plans: filter_content(pal.list_plans_with_cache(filter_content: true), filter), modulepath: pal.user_modulepath } end # Show available plugins. # # @return [Hash] # def list_plugins { plugins: plugins.list_plugins, modulepath: pal.user_modulepath } end # Initialize the current directory as a Bolt project. # # @param name [String] The name of the project. # @param [Bolt::Outputter] An outputter instance. # @param modules [Array[String], NilClass] Modules to install. # @return [Boolean] # def create_project(name, outputter, modules: nil) Bolt::ProjectManager.new(config, outputter, pal) .create(Dir.pwd, name, modules) end # Migrate a project to current best practices. # # @param [Bolt::Outputter] An outputter instance. # @return [Boolean] # def migrate_project(outputter) Bolt::ProjectManager.new(config, outputter, pal).migrate end # Run a script on a list of targets. # # @param script [String] The path to the script to run. # @param targets [Array[String]] The targets to run on. # @param arguments [Array[String], NilClass] Arguments to pass to the script. # @param env_vars [Hash] Environment variables to set on the target. # @return [Bolt::ResultSet] # def run_script(script, targets, arguments: [], env_vars: nil) script = find_file(script) Bolt::Util.validate_file('script', script) with_benchmark do executor.run_script(inventory.get_targets(targets), script, arguments, env_vars: env_vars) end end # Generate a keypair using the configured secret plugin. # # @param force [Boolean] Forcibly create a keypair. # @param plugin [String] The secret plugin to use. # @return [Boolean] # def create_secret_keys(force: false, plugin: 'pkcs7') unless plugins.by_name(plugin) raise Bolt::Plugin::PluginError::Unknown, plugin end plugins.get_hook(plugin, :secret_createkeys) .call('force' => force) end # Decrypt ciphertext using the configured secret plugin. # # @param ciphertext [String] The ciphertext to decrypt. # @param plugin [String] The secret plugin to use. # @return [Boolean] # def decrypt_secret(ciphertext, plugin: 'pkcs7') unless plugins.by_name(plugin) raise Bolt::Plugin::PluginError::Unknown, plugin end plugins.get_hook(plugin, :secret_decrypt) .call('encrypted_value' => ciphertext) end # Encrypt plaintext using the configured secret plugin. # # @param plaintext [String] The plaintext to encrypt. # @param plugin [String] The secret plugin to use. # @return [Boolean] # def encrypt_secret(plaintext, plugin: 'pkcs7') unless plugins.by_name(plugin) raise Bolt::Plugin::PluginError::Unknown, plugin end plugins.get_hook(plugin, :secret_encrypt) .call('plaintext_value' => plaintext) end # Run a task on a list of targets. # # @param task [String] The name of the task. # @param options [Hash] Additional options. # @return [Bolt::ResultSet] # def run_task(task, targets, params: {}) targets = inventory.get_targets(targets) with_benchmark do pal.run_task(task, targets, params, executor, inventory) end end # Show task information. # # @param task [String] The name of the task to show. # @return [Hash] # def show_task(task) { task: pal.get_task(task) } end # List available tasks. # # @param filter [String] A substring to filter tasks by. # @return [Hash] # def list_tasks(filter: nil) { tasks: filter_content(pal.list_tasks_with_cache(filter_content: true), filter), modulepath: pal.user_modulepath } end # Assert that there is a project configuration file. # # @param project [Bolt::Project] The Bolt project. # private def assert_project_file(project) unless project.project_file? command = Bolt::Util.powershell? ? 'New-BoltProject' : 'bolt project init' msg = "Could not find project configuration file #{project.project_file}, unable "\ "to install modules. To create a Bolt project, run '#{command}'." raise Bolt::Error.new(msg, 'bolt/missing-project-config-error') end end # Filter a list of content by matching substring. # # @param content [Hash] The content to filter. # @param filter [String] The substring to filter content by. # private def filter_content(content, filter) return content unless content && filter content.select { |name,| name.include?(filter) } end # Return the path to a file. If the path is an absolute or relative path to # a file, and the file exists, return the path as-is. Otherwise, check if # the path is a Puppet file path and look for the file in a module's files # directory. # # @param path [String] The path to the file. # private def find_file(path) return path if File.exist?(path) || Pathname.new(path).absolute? modulepath = Bolt::Config::Modulepath.new(config.modulepath) modules = Bolt::Module.discover(modulepath.full_modulepath, config.project) mod, file = path.split(File::SEPARATOR, 2) future = executor.future&.fetch('file_paths', false) if modules[mod] logger.debug("Did not find file at #{File.expand_path(path)}, checking in module '#{mod}'") found = Bolt::Util.find_file_in_module(modules[mod].path, file || "", future) path = found.nil? ? File.join(modules[mod].path, 'files', file) : found end path end # Get a list of Bolt guides. # private def load_guides root_path = File.expand_path(File.join(__dir__, '..', '..', 'guides')) files = Dir.children(root_path).sort files.each_with_object({}) do |file, guides| next if file !~ /\.(yaml|yml)\z/ topic = File.basename(file, ".*") guides[topic] = File.join(root_path, file) end rescue SystemCallError => e raise Bolt::FileError.new("#{e.message}: unable to load guides directory", root_path) end # Return a hash of targets sorted by those that are found in the inventory # and those that are provided on the command line. # # @param targets [Array[String]] The targets to group. # private def group_targets_by_source(targets) # Retrieve the known group and target names. This needs to be done before # updating targets, as that will add adhoc targets to the inventory. known_names = inventory.target_names targets = inventory.get_targets(targets) inventory_targets, adhoc_targets = targets.partition do |target| known_names.include?(target.name) end { inventory: inventory_targets, adhoc: adhoc_targets } end # Benchmark the action and set the elapsed time on the result. # private def with_benchmark result = nil elapsed_time = Benchmark.realtime do result = yield end result.tap { |r| r.elapsed_time = elapsed_time if r.is_a?(Bolt::ResultSet) } end end end
1
19,223
If we resolve here, isn't the whole `module_install` config setting or any subkeys also pluggable? I think that's totally fine, just want to make sure that that's known, and we should also update the data in `options.rb` for those options
puppetlabs-bolt
rb
@@ -376,6 +376,7 @@ hipError_t ihipExtLaunchMultiKernelMultiDevice(hipLaunchParams* launchParamsList if(globalWorkSizeX > UINT32_MAX || globalWorkSizeY > UINT32_MAX || globalWorkSizeZ > UINT32_MAX) { + free(kds); return hipErrorInvalidConfiguration; }
1
/* Copyright (c) 2015 - present Advanced Micro Devices, Inc. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "hip/hip_runtime.h" #include "hip/hcc_detail/elfio/elfio.hpp" #include "hip/hcc_detail/hsa_helpers.hpp" #include "hip/hcc_detail/program_state.hpp" #include "hip_hcc_internal.h" #include "hip/hip_ext.h" #include "program_state.inl" #include "trace_helper.h" #include "hc_am.hpp" #include <hsa/amd_hsa_kernel_code.h> #include <hsa/hsa.h> #include <hsa/hsa_ext_amd.h> #include <algorithm> #include <cassert> #include <cstdint> #include <cstdio> #include <cstdlib> #include <fstream> #include <map> #include <memory> #include <mutex> #include <sstream> #include <stdexcept> #include <string> #include <tuple> #include <unordered_map> #include <utility> #include <vector> #include "../include/hip/hcc_detail/code_object_bundle.hpp" #include "hip_fatbin.h" // TODO Use Pool APIs from HCC to get memory regions. using namespace ELFIO; using namespace std; // For HIP implicit kernargs. static const size_t HIP_IMPLICIT_KERNARG_SIZE = 56; static const size_t HIP_IMPLICIT_KERNARG_ALIGNMENT = 8; struct amd_kernel_code_v3_t { uint32_t group_segment_fixed_size; uint32_t private_segment_fixed_size; uint8_t reserved0[8]; int64_t kernel_code_entry_byte_offset; uint8_t reserved1[24]; uint32_t compute_pgm_rsrc1; uint32_t compute_pgm_rsrc2; uint16_t kernel_code_properties; uint8_t reserved2[6]; }; // calculate MD5 checksum inline std::string checksum(size_t size, const char *source) { // FNV-1a hashing, 64-bit version const uint64_t FNV_prime = 0x100000001b3; const uint64_t FNV_basis = 0xcbf29ce484222325; uint64_t hash = FNV_basis; const char *str = static_cast<const char *>(source); for (auto i = 0; i < size; ++i) { hash ^= *str++; hash *= FNV_prime; } return std::to_string(hash); } inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { assert(Align != 0u && "Align can't be 0."); Skew %= Align; return (Value + Align - 1 - Skew) / Align * Align + Skew; } struct ihipKernArgInfo { vector<uint32_t> Size; vector<uint32_t> Align; vector<string> ArgType; vector<string> ArgName; uint32_t totalSize; }; map<string, ihipKernArgInfo> kernelArguments; struct ihipModuleSymbol_t { uint64_t _object{}; // The kernel object. amd_kernel_code_t const* _header{}; string _name; // TODO - review for performance cost. Name is just used for debug. vector<pair<size_t, size_t>> _kernarg_layout{}; bool _is_code_object_v3{}; }; template <> string ToString(hipFunction_t v) { std::ostringstream ss; ss << "0x" << std::hex << v->_object; return ss.str(); }; const std::string& FunctionSymbol(const hipFunction_t f) { return f->_name; }; extern hipError_t ihipGetDeviceProperties(hipDeviceProp_t* props, int device); #define CHECK_HSA(hsaStatus, hipStatus) \ if (hsaStatus != HSA_STATUS_SUCCESS) { \ return hipStatus; \ } #define CHECKLOG_HSA(hsaStatus, hipStatus) \ if (hsaStatus != HSA_STATUS_SUCCESS) { \ return ihipLogStatus(hipStatus); \ } hipError_t ihipModuleLaunchKernel(TlsData *tls, hipFunction_t f, uint32_t globalWorkSizeX, uint32_t globalWorkSizeY, uint32_t globalWorkSizeZ, uint32_t localWorkSizeX, uint32_t localWorkSizeY, uint32_t localWorkSizeZ, size_t sharedMemBytes, hipStream_t hStream, void** kernelParams, void** extra, hipEvent_t startEvent, hipEvent_t stopEvent, uint32_t flags, bool isStreamLocked = 0, void** impCoopParams = 0, hc::accelerator_view* coopAV = 0) { using namespace hip_impl; auto ctx = ihipGetTlsDefaultCtx(); hipError_t ret = hipSuccess; if (ctx == nullptr) { ret = hipErrorInvalidDevice; } else { int deviceId = ctx->getDevice()->_deviceId; ihipDevice_t* currentDevice = ihipGetDevice(deviceId); hsa_agent_t gpuAgent = (hsa_agent_t)currentDevice->_hsaAgent; std::vector<char> kernargs{}; if (kernelParams) { if (extra) return hipErrorInvalidValue; for (auto&& x : f->_kernarg_layout) { const auto p{static_cast<const char*>(*kernelParams)}; kernargs.insert( kernargs.cend(), round_up_to_next_multiple_nonnegative( kernargs.size(), x.second) - kernargs.size(), '\0'); kernargs.insert(kernargs.cend(), p, p + x.first); ++kernelParams; } } else if (extra) { if (extra[0] == HIP_LAUNCH_PARAM_BUFFER_POINTER && extra[2] == HIP_LAUNCH_PARAM_BUFFER_SIZE && extra[4] == HIP_LAUNCH_PARAM_END) { auto args = (char*)extra[1]; size_t argSize = *(size_t*)(extra[3]); kernargs.insert(kernargs.end(), args, args+argSize); } else { return hipErrorNotInitialized; } } else if (f->_kernarg_layout.size() != 0) { return hipErrorInvalidValue; } // Insert 56-bytes at the end for implicit kernel arguments and fill with value zero. size_t padSize = (~kernargs.size() + 1) & (HIP_IMPLICIT_KERNARG_ALIGNMENT - 1); kernargs.insert(kernargs.end(), padSize + HIP_IMPLICIT_KERNARG_SIZE, 0); if (impCoopParams) { const auto p{static_cast<const char*>(*impCoopParams)}; // The sixth index is for multi-grid synchronization copy(p, p + HIP_IMPLICIT_KERNARG_ALIGNMENT, (kernargs.end() - HIP_IMPLICIT_KERNARG_SIZE) + 6 * HIP_IMPLICIT_KERNARG_ALIGNMENT); } /* Kernel argument preparation. */ grid_launch_parm lp; lp.dynamic_group_mem_bytes = sharedMemBytes; // TODO - this should be part of preLaunchKernel. hStream = ihipPreLaunchKernel( hStream, dim3(globalWorkSizeX/localWorkSizeX, globalWorkSizeY/localWorkSizeY, globalWorkSizeZ/localWorkSizeZ), dim3(localWorkSizeX, localWorkSizeY, localWorkSizeZ), &lp, f->_name.c_str(), isStreamLocked); hsa_kernel_dispatch_packet_t aql; memset(&aql, 0, sizeof(aql)); // aql.completion_signal._handle = 0; // aql.kernarg_address = 0; aql.workgroup_size_x = localWorkSizeX; aql.workgroup_size_y = localWorkSizeY; aql.workgroup_size_z = localWorkSizeZ; aql.grid_size_x = globalWorkSizeX; aql.grid_size_y = globalWorkSizeY; aql.grid_size_z = globalWorkSizeZ; if (f->_is_code_object_v3) { const auto* header = reinterpret_cast<const amd_kernel_code_v3_t*>(f->_header); aql.group_segment_size = header->group_segment_fixed_size + sharedMemBytes; aql.private_segment_size = header->private_segment_fixed_size; } else { aql.group_segment_size = f->_header->workgroup_group_segment_byte_size + sharedMemBytes; aql.private_segment_size = f->_header->workitem_private_segment_byte_size; } aql.kernel_object = f->_object; aql.setup = 3 << HSA_KERNEL_DISPATCH_PACKET_SETUP_DIMENSIONS; aql.header = (HSA_PACKET_TYPE_KERNEL_DISPATCH << HSA_PACKET_HEADER_TYPE); if((flags & 0x1)== 0 ) { //in_order aql.header |= (1 << HSA_PACKET_HEADER_BARRIER); } aql.header |= lp.launch_fence; hc::completion_future cf; if (coopAV) { lp.av = coopAV; } lp.av->dispatch_hsa_kernel(&aql, kernargs.data(), kernargs.size(), (startEvent || stopEvent) ? &cf : nullptr #if (__hcc_workweek__ > 17312) , f->_name.c_str() #endif ); if (startEvent) { startEvent->attachToCompletionFuture(&cf, hStream, hipEventTypeStartCommand); } if (stopEvent) { stopEvent->attachToCompletionFuture(&cf, hStream, hipEventTypeStopCommand); } ihipPostLaunchKernel(f->_name.c_str(), hStream, lp, isStreamLocked); } return ret; } hipError_t hipModuleLaunchKernel(hipFunction_t f, uint32_t gridDimX, uint32_t gridDimY, uint32_t gridDimZ, uint32_t blockDimX, uint32_t blockDimY, uint32_t blockDimZ, uint32_t sharedMemBytes, hipStream_t hStream, void** kernelParams, void** extra) { HIP_INIT_API(hipModuleLaunchKernel, f, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, sharedMemBytes, hStream, kernelParams, extra); size_t globalWorkSizeX = (size_t)gridDimX * (size_t)blockDimX; size_t globalWorkSizeY = (size_t)gridDimY * (size_t)blockDimY; size_t globalWorkSizeZ = (size_t)gridDimZ * (size_t)blockDimZ; if(globalWorkSizeX > UINT32_MAX || globalWorkSizeY > UINT32_MAX || globalWorkSizeZ > UINT32_MAX) { return hipErrorInvalidConfiguration; } return ihipLogStatus(ihipModuleLaunchKernel(tls, f, globalWorkSizeX, globalWorkSizeY, globalWorkSizeZ, blockDimX, blockDimY, blockDimZ, sharedMemBytes, hStream, kernelParams, extra, nullptr, nullptr, 0)); } hipError_t hipExtModuleLaunchKernel(hipFunction_t f, uint32_t globalWorkSizeX, uint32_t globalWorkSizeY, uint32_t globalWorkSizeZ, uint32_t localWorkSizeX, uint32_t localWorkSizeY, uint32_t localWorkSizeZ, size_t sharedMemBytes, hipStream_t hStream, void** kernelParams, void** extra, hipEvent_t startEvent, hipEvent_t stopEvent, uint32_t flags) { HIP_INIT_API(hipExtModuleLaunchKernel, f, globalWorkSizeX, globalWorkSizeY, globalWorkSizeZ, localWorkSizeX, localWorkSizeY, localWorkSizeZ, sharedMemBytes, hStream, kernelParams, extra); return ihipLogStatus(ihipModuleLaunchKernel(tls, f, globalWorkSizeX, globalWorkSizeY, globalWorkSizeZ, localWorkSizeX, localWorkSizeY, localWorkSizeZ, sharedMemBytes, hStream, kernelParams, extra, startEvent, stopEvent, flags)); } hipError_t hipHccModuleLaunchKernel(hipFunction_t f, uint32_t globalWorkSizeX, uint32_t globalWorkSizeY, uint32_t globalWorkSizeZ, uint32_t localWorkSizeX, uint32_t localWorkSizeY, uint32_t localWorkSizeZ, size_t sharedMemBytes, hipStream_t hStream, void** kernelParams, void** extra, hipEvent_t startEvent, hipEvent_t stopEvent) { HIP_INIT_API(hipHccModuleLaunchKernel, f, globalWorkSizeX, globalWorkSizeY, globalWorkSizeZ, localWorkSizeX, localWorkSizeY, localWorkSizeZ, sharedMemBytes, hStream, kernelParams, extra); return ihipLogStatus(ihipModuleLaunchKernel(tls, f, globalWorkSizeX, globalWorkSizeY, globalWorkSizeZ, localWorkSizeX, localWorkSizeY, localWorkSizeZ, sharedMemBytes, hStream, kernelParams, extra, startEvent, stopEvent, 0)); } __attribute__((visibility("default"))) hipError_t ihipExtLaunchMultiKernelMultiDevice(hipLaunchParams* launchParamsList, int numDevices, unsigned int flags, hip_impl::program_state& ps) { hipError_t result; if ((numDevices > g_deviceCnt) || (launchParamsList == nullptr)) { return hipErrorInvalidValue; } hipFunction_t* kds = reinterpret_cast<hipFunction_t*>(malloc(sizeof(hipFunction_t) * numDevices)); if (kds == nullptr) { return hipErrorNotInitialized; } // prepare all kernel descriptors for each device as all streams will be locked in the next loop for (int i = 0; i < numDevices; ++i) { const hipLaunchParams& lp = launchParamsList[i]; if (lp.stream == nullptr) { free(kds); return hipErrorNotInitialized; } kds[i] = ps.kernel_descriptor(reinterpret_cast<std::uintptr_t>(lp.func), hip_impl::target_agent(lp.stream)); if (kds[i] == nullptr) { free(kds); return hipErrorInvalidValue; } hip_impl::kernargs_size_align kargs = ps.get_kernargs_size_align( reinterpret_cast<std::uintptr_t>(lp.func)); kds[i]->_kernarg_layout = *reinterpret_cast<const std::vector<std::pair<std::size_t, std::size_t>>*>( kargs.getHandle()); } // lock all streams before launching kernels to each device for (int i = 0; i < numDevices; ++i) { LockedAccessor_StreamCrit_t streamCrit(launchParamsList[i].stream->criticalData(), false); #if (__hcc_workweek__ >= 19213) streamCrit->_av.acquire_locked_hsa_queue(); #endif } GET_TLS(); size_t globalWorkSizeX = 0, globalWorkSizeY = 0, globalWorkSizeZ = 0; // launch kernels for each device for (int i = 0; i < numDevices; ++i) { const hipLaunchParams& lp = launchParamsList[i]; globalWorkSizeX = (size_t)lp.gridDim.x * (size_t)lp.blockDim.x; globalWorkSizeY = (size_t)lp.gridDim.y * (size_t)lp.blockDim.y; globalWorkSizeZ = (size_t)lp.gridDim.z * (size_t)lp.blockDim.z; if(globalWorkSizeX > UINT32_MAX || globalWorkSizeY > UINT32_MAX || globalWorkSizeZ > UINT32_MAX) { return hipErrorInvalidConfiguration; } result = ihipModuleLaunchKernel(tls, kds[i], lp.gridDim.x * lp.blockDim.x, lp.gridDim.y * lp.blockDim.y, lp.gridDim.z * lp.blockDim.z, lp.blockDim.x, lp.blockDim.y, lp.blockDim.z, lp.sharedMem, lp.stream, lp.args, nullptr, nullptr, nullptr, 0, true /* stream is already locked above and will be unlocked in the below code after launching kernels on all devices*/); } // unlock all streams for (int i = 0; i < numDevices; ++i) { launchParamsList[i].stream->criticalData().unlock(); #if (__hcc_workweek__ >= 19213) launchParamsList[i].stream->criticalData()._av.release_locked_hsa_queue(); #endif } free(kds); return result; } void getGprsLdsUsage(hipFunction_t f, size_t* usedVGPRS, size_t* usedSGPRS, size_t* usedLDS) { if (f->_is_code_object_v3) { const auto header = reinterpret_cast<const amd_kernel_code_v3_t*>(f->_header); // GRANULATED_WAVEFRONT_VGPR_COUNT is specified in 0:5 bits of COMPUTE_PGM_RSRC1 // the granularity for gfx6-gfx9 is max(0, ceil(vgprs_used / 4) - 1) *usedVGPRS = ((header->compute_pgm_rsrc1 & 0x3F) + 1) << 2; // GRANULATED_WAVEFRONT_SGPR_COUNT is specified in 6:9 bits of COMPUTE_PGM_RSRC1 // the granularity for gfx9+ is 2 * max(0, ceil(sgprs_used / 16) - 1) *usedSGPRS = ((((header->compute_pgm_rsrc1 & 0x3C0) >> 6) >> 1) + 1) << 4; *usedLDS = header->group_segment_fixed_size; } else { const auto header = f->_header; // VGPRs granularity is 4 *usedVGPRS = ((header->workitem_vgpr_count + 3) >> 2) << 2; // adding 2 to take into account the 2 VCC registers & handle the granularity of 16 *usedSGPRS = header->wavefront_sgpr_count + 2; *usedSGPRS = ((*usedSGPRS + 15) >> 4) << 4; *usedLDS = header->workgroup_group_segment_byte_size; } } static hipError_t ihipOccupancyMaxActiveBlocksPerMultiprocessor( TlsData *tls, uint32_t* numBlocks, hipFunction_t f, uint32_t blockSize, size_t dynSharedMemPerBlk) { using namespace hip_impl; auto ctx = ihipGetTlsDefaultCtx(); if (ctx == nullptr) { return hipErrorInvalidDevice; } if (numBlocks == nullptr) { return hipErrorInvalidValue; } hipDeviceProp_t prop{}; ihipGetDeviceProperties(&prop, ihipGetTlsDefaultCtx()->getDevice()->_deviceId); if (blockSize > prop.maxThreadsPerBlock) { *numBlocks = 0; return hipSuccess; } prop.regsPerBlock = prop.regsPerBlock ? prop.regsPerBlock : 64 * 1024; size_t usedVGPRS = 0; size_t usedSGPRS = 0; size_t usedLDS = 0; getGprsLdsUsage(f, &usedVGPRS, &usedSGPRS, &usedLDS); // Due to SPI and private memory limitations, the max of wavefronts per CU in 32 size_t wavefrontSize = prop.warpSize; size_t maxWavefrontsPerCU = min(prop.maxThreadsPerMultiProcessor / wavefrontSize, 32); const size_t simdPerCU = 4; const size_t maxWavesPerSimd = maxWavefrontsPerCU / simdPerCU; size_t numWavefronts = (blockSize + wavefrontSize - 1) / wavefrontSize; size_t availableVGPRs = (prop.regsPerBlock / wavefrontSize / simdPerCU); size_t vgprs_alu_occupancy = simdPerCU * (usedVGPRS == 0 ? maxWavesPerSimd : std::min(maxWavesPerSimd, availableVGPRs / usedVGPRS)); // Calculate blocks occupancy per CU based on VGPR usage *numBlocks = vgprs_alu_occupancy / numWavefronts; const size_t availableSGPRs = (prop.gcnArch < 800) ? 512 : 800; size_t sgprs_alu_occupancy = simdPerCU * (usedSGPRS == 0 ? maxWavesPerSimd : std::min(maxWavesPerSimd, availableSGPRs / usedSGPRS)); // Calculate blocks occupancy per CU based on SGPR usage *numBlocks = std::min(*numBlocks, (uint32_t) (sgprs_alu_occupancy / numWavefronts)); size_t total_used_lds = usedLDS + dynSharedMemPerBlk; if (total_used_lds != 0) { // Calculate LDS occupacy per CU. lds_per_cu / (static_lsd + dynamic_lds) size_t lds_occupancy = prop.maxSharedMemoryPerMultiProcessor / total_used_lds; *numBlocks = std::min(*numBlocks, (uint32_t) lds_occupancy); } return hipSuccess; } namespace { // kernel for initializing GWS // nwm1 is the total number of work groups minus 1 __global__ void init_gws(uint nwm1) { __ockl_gws_init(nwm1, 0); } } __attribute__((visibility("default"))) hipError_t ihipLaunchCooperativeKernel(const void* f, dim3 gridDim, dim3 blockDimX, void** kernelParams, unsigned int sharedMemBytes, hipStream_t stream, hip_impl::program_state& ps) { #if (__hcc_workweek__ >= 20093) hipError_t result; if (f == nullptr || kernelParams == nullptr) { return hipErrorNotInitialized; } stream = ihipSyncAndResolveStream(stream); if (!stream->getDevice()->_props.cooperativeLaunch || blockDimX.x * blockDimX.y * blockDimX.z > stream->getDevice()->_props.maxThreadsPerBlock) { return hipErrorInvalidConfiguration; } size_t globalWorkSizeX = (size_t)gridDim.x * (size_t)blockDimX.x; size_t globalWorkSizeY = (size_t)gridDim.y * (size_t)blockDimX.y; size_t globalWorkSizeZ = (size_t)gridDim.z * (size_t)blockDimX.z; if(globalWorkSizeX > UINT32_MAX || globalWorkSizeY > UINT32_MAX || globalWorkSizeZ > UINT32_MAX) { return hipErrorInvalidConfiguration; } // Prepare the kernel descriptor for initializing the GWS hipFunction_t gwsKD = ps.kernel_descriptor( reinterpret_cast<std::uintptr_t>(&init_gws), hip_impl::target_agent(stream)); if (gwsKD == nullptr) { return hipErrorInvalidValue; } hip_impl::kernargs_size_align gwsKargs = ps.get_kernargs_size_align( reinterpret_cast<std::uintptr_t>(&init_gws)); gwsKD->_kernarg_layout = *reinterpret_cast<const std::vector< std::pair<std::size_t, std::size_t>>*>(gwsKargs.getHandle()); // Prepare the kernel descriptor for the main kernel hipFunction_t kd = ps.kernel_descriptor( reinterpret_cast<std::uintptr_t>(f), hip_impl::target_agent(stream)); if (kd == nullptr) { return hipErrorInvalidValue; } hip_impl::kernargs_size_align kargs = ps.get_kernargs_size_align( reinterpret_cast<std::uintptr_t>(f)); kd->_kernarg_layout = *reinterpret_cast<const std::vector< std::pair<std::size_t, std::size_t>>*>(kargs.getHandle()); GET_TLS(); uint32_t numBlocksPerSm = 0; result = ihipOccupancyMaxActiveBlocksPerMultiprocessor(tls, &numBlocksPerSm, kd, blockDimX.x * blockDimX.y * blockDimX.z, sharedMemBytes); if (result != hipSuccess) { return hipErrorLaunchFailure; } int maxActiveBlocks = numBlocksPerSm * stream->getDevice()->_props.multiProcessorCount; //check to see if the workload fits on the GPU if (gridDim.x * gridDim.y * gridDim.z > maxActiveBlocks) { return hipErrorCooperativeLaunchTooLarge; } void *gwsKernelParam[1]; // calculate total number of work groups minus 1 for the main kernel uint nwm1 = (gridDim.x * gridDim.y * gridDim.z) - 1; gwsKernelParam[0] = &nwm1; hc::accelerator acc = stream->getDevice()->_acc; // create a cooperative accelerated view for launching gws and main kernels hc::accelerator_view coopAV = acc.create_cooperative_view(); LockedAccessor_StreamCrit_t streamCrit(stream->criticalData(), false); // the cooperative queue will wait until this stream completes its operations hc::completion_future streamCF; if (!streamCrit->_av.get_is_empty()) { streamCF = streamCrit->_av.create_marker(hc::accelerator_scope); coopAV.create_blocking_marker(streamCF, hc::accelerator_scope); } streamCrit->_av.acquire_locked_hsa_queue(); coopAV.acquire_locked_hsa_queue(); // launch the init_gws kernel to initialize the GWS in the dedicated cooperative queue result = ihipModuleLaunchKernel(tls, gwsKD, 1, 1, 1, 1, 1, 1, 0, stream, gwsKernelParam, nullptr, nullptr, nullptr, 0, true, nullptr , &coopAV); if (result != hipSuccess) { stream->criticalData().unlock(); stream->criticalData()._av.release_locked_hsa_queue(); coopAV.release_locked_hsa_queue(); return hipErrorLaunchFailure; } size_t impCoopArg = 1; void* impCoopParams[1]; impCoopParams[0] = &impCoopArg; // launch the main kernel in the cooperative queue result = ihipModuleLaunchKernel(tls, kd, gridDim.x * blockDimX.x, gridDim.y * blockDimX.y, gridDim.z * blockDimX.z, blockDimX.x, blockDimX.y, blockDimX.z, sharedMemBytes, stream, kernelParams, nullptr, nullptr, nullptr, 0, true, impCoopParams, &coopAV); coopAV.release_locked_hsa_queue(); stream->criticalData()._av.release_locked_hsa_queue(); // this stream will wait until the cooperative queue completes its operations hc::completion_future cooperativeCF; if (!coopAV.get_is_empty()) { cooperativeCF = coopAV.create_marker(hc::accelerator_scope); streamCrit->_av.create_blocking_marker(cooperativeCF, hc::accelerator_scope); } stream->criticalData().unlock(); return result; #else return hipErrorInvalidConfiguration; #endif } __attribute__((visibility("default"))) hipError_t ihipLaunchCooperativeKernelMultiDevice(hipLaunchParams* launchParamsList, int numDevices, unsigned int flags, hip_impl::program_state& ps) { #if (__hcc_workweek__ >= 20093) hipError_t result; if (numDevices > g_deviceCnt || launchParamsList == nullptr || numDevices > MAX_COOPERATIVE_GPUs) { return hipErrorInvalidValue; } vector<hipStream_t> streams; vector<uint64_t> deviceIDs; // check to see if we have valid distinct streams/devices, if cooperative multi device // launch is supported and if grid/block dimensions are valid for (int i = 0; i < numDevices; ++i) { const hipLaunchParams& lp = launchParamsList[i]; if (lp.stream == nullptr){ return hipErrorInvalidResourceHandle; } if (find(streams.begin(), streams.end(), lp.stream) == streams.end()) { streams.push_back(lp.stream); } else { return hipErrorInvalidDevice; } const ihipDevice_t* currentDevice = lp.stream->getDevice(); if (find(deviceIDs.begin(), deviceIDs.end(), currentDevice->_deviceId) == deviceIDs.end()) { deviceIDs.push_back(currentDevice->_deviceId); } else { return hipErrorInvalidDevice; } if (!currentDevice->_props.cooperativeMultiDeviceLaunch) { return hipErrorInvalidConfiguration; } if (lp.gridDim.x == 0 || lp.gridDim.y == 0 || lp.gridDim.z == 0 || lp.blockDim.x == 0 || lp.blockDim.y == 0 || lp.blockDim.z == 0 || lp.blockDim.x * lp.blockDim.y * lp.blockDim.z > currentDevice->_props.maxThreadsPerBlock){ return hipErrorInvalidConfiguration; } } vector<hipFunction_t> gwsKds; vector<hipFunction_t> kds; GET_TLS(); // prepare all kernel descriptors for initializing the GWS and the main kernels per device for (int i = 0; i < numDevices; ++i) { const hipLaunchParams& lp = launchParamsList[i]; gwsKds.push_back(ps.kernel_descriptor(reinterpret_cast<std::uintptr_t>(&init_gws), hip_impl::target_agent(lp.stream))); if (gwsKds[i] == nullptr) { return hipErrorInvalidValue; } hip_impl::kernargs_size_align gwsKargs = ps.get_kernargs_size_align( reinterpret_cast<std::uintptr_t>(&init_gws)); gwsKds[i]->_kernarg_layout = *reinterpret_cast<const std::vector<std::pair<std::size_t, std::size_t>>*>( gwsKargs.getHandle()); kds.push_back(ps.kernel_descriptor(reinterpret_cast<std::uintptr_t>(lp.func), hip_impl::target_agent(lp.stream))); if (kds[i] == nullptr) { return hipErrorInvalidValue; } hip_impl::kernargs_size_align kargs = ps.get_kernargs_size_align( reinterpret_cast<std::uintptr_t>(lp.func)); kds[i]->_kernarg_layout = *reinterpret_cast<const std::vector<std::pair<std::size_t, std::size_t>>*>( kargs.getHandle()); uint32_t numBlocksPerSm = 0; result = ihipOccupancyMaxActiveBlocksPerMultiprocessor(tls, &numBlocksPerSm, kds[i], lp.blockDim.x * lp.blockDim.y * lp.blockDim.z, lp.sharedMem); if (result != hipSuccess) { return hipErrorLaunchFailure; } int maxActiveBlocks = numBlocksPerSm * lp.stream->getDevice()->_props.multiProcessorCount; //check to see if the workload fits on the GPU if (lp.gridDim.x * lp.gridDim.y * lp.gridDim.z > maxActiveBlocks) { return hipErrorCooperativeLaunchTooLarge; } } vector<hc::accelerator_view> coopAVs; // create cooperative accelerated views for launching gws and main kernels on each device for (int i = 0; i < numDevices; ++i) { hc::accelerator acc = launchParamsList[i].stream->getDevice()->_acc; coopAVs.push_back(acc.create_cooperative_view()); } mg_sync *mg_sync_ptr = 0; vector<mg_info *> mg_info_ptr; result = hip_internal::ihipHostMalloc(tls, (void **)&mg_sync_ptr, sizeof(mg_sync), hipHostMallocDefault); if (result != hipSuccess) { return hipErrorInvalidValue; } mg_sync_ptr->w0 = 0; mg_sync_ptr->w1 = 0; uint all_sum = 0; for (int i = 0; i < numDevices; ++i) { mg_info *mg_info_temp = nullptr; result = hip_internal::ihipHostMalloc(tls, (void **)&mg_info_temp, sizeof(mg_info), hipHostMallocDefault); if (result != hipSuccess) { hip_internal::ihipHostFree(tls, mg_sync_ptr); for (int j = 0; j < i; ++j) { hip_internal::ihipHostFree(tls, mg_info_ptr[j]); } return hipErrorInvalidValue; } mg_info_ptr.push_back(mg_info_temp); // calculate the sum of sizes of all grids const hipLaunchParams& lp = launchParamsList[i]; all_sum += lp.blockDim.x * lp.blockDim.y * lp.blockDim.z * lp.gridDim.x * lp.gridDim.y * lp.gridDim.z; } // lock all streams before launching the blit kernels for initializing the GWS and main kernels to each device for (int i = 0; i < numDevices; ++i) { LockedAccessor_StreamCrit_t streamCrit(launchParamsList[i].stream->criticalData(), false); hc::completion_future streamCF; if (!streamCrit->_av.get_is_empty()) { streamCF = streamCrit->_av.create_marker(hc::accelerator_scope); coopAVs[i].create_blocking_marker(streamCF, hc::accelerator_scope); } streamCrit->_av.acquire_locked_hsa_queue(); coopAVs[i].acquire_locked_hsa_queue(); } // launch the init_gws kernel to initialize the GWS for each device for (int i = 0; i < numDevices; ++i) { const hipLaunchParams& lp = launchParamsList[i]; void *gwsKernelParam[1]; uint nwm1 = (lp.gridDim.x * lp.gridDim.y * lp.gridDim.z) - 1; gwsKernelParam[0] = &nwm1; result = ihipModuleLaunchKernel(tls, gwsKds[i], 1, 1, 1, 1, 1, 1, 0, lp.stream, gwsKernelParam, nullptr, nullptr, nullptr, 0, true, nullptr, &coopAVs[i]); if (result != hipSuccess) { for (int j = 0; j < numDevices; ++j) { launchParamsList[j].stream->criticalData().unlock(); launchParamsList[j].stream->criticalData()._av.release_locked_hsa_queue(); coopAVs[i].release_locked_hsa_queue(); } hip_internal::ihipHostFree(tls, mg_sync_ptr); for (int j = 0; j < numDevices; ++j) { hip_internal::ihipHostFree(tls, mg_info_ptr[j]); } return hipErrorLaunchFailure; } } void* impCoopParams[1]; ulong prev_sum = 0; size_t globalWorkSizeX = 0, globalWorkSizeY = 0, globalWorkSizeZ = 0; // launch the main kernels for each device for (int i = 0; i < numDevices; ++i) { const hipLaunchParams& lp = launchParamsList[i]; //initialize and setup the implicit kernel argument for multi-grid sync mg_info_ptr[i]->mgs = mg_sync_ptr; mg_info_ptr[i]->grid_id = i; mg_info_ptr[i]->num_grids = numDevices; mg_info_ptr[i]->all_sum = all_sum; mg_info_ptr[i]->prev_sum = prev_sum; prev_sum += lp.blockDim.x * lp.blockDim.y * lp.blockDim.z * lp.gridDim.x * lp.gridDim.y * lp.gridDim.z; impCoopParams[0] = &mg_info_ptr[i]; globalWorkSizeX = (size_t)lp.gridDim.x * (size_t)lp.blockDim.x; globalWorkSizeY = (size_t)lp.gridDim.y * (size_t)lp.blockDim.y; globalWorkSizeZ = (size_t)lp.gridDim.z * (size_t)lp.blockDim.z; if(globalWorkSizeX > UINT32_MAX || globalWorkSizeY > UINT32_MAX || globalWorkSizeZ > UINT32_MAX) { return hipErrorInvalidConfiguration; } result = ihipModuleLaunchKernel(tls, kds[i], lp.gridDim.x * lp.blockDim.x, lp.gridDim.y * lp.blockDim.y, lp.gridDim.z * lp.blockDim.z, lp.blockDim.x, lp.blockDim.y, lp.blockDim.z, lp.sharedMem, lp.stream, lp.args, nullptr, nullptr, nullptr, 0, true, impCoopParams, &coopAVs[i]); if (result != hipSuccess) { for (int j = 0; j < numDevices; ++j) { launchParamsList[j].stream->criticalData().unlock(); launchParamsList[j].stream->criticalData()._av.release_locked_hsa_queue(); coopAVs[i].release_locked_hsa_queue(); } hip_internal::ihipHostFree(tls, mg_sync_ptr); for (int j = 0; j < numDevices; ++j) { hip_internal::ihipHostFree(tls, mg_info_ptr[j]); } return hipErrorLaunchFailure; } } // unlock all streams for (int i = 0; i < numDevices; ++i) { coopAVs[i].release_locked_hsa_queue(); launchParamsList[i].stream->criticalData()._av.release_locked_hsa_queue(); hc::completion_future cooperativeCF; if (!coopAVs[i].get_is_empty()) { cooperativeCF = coopAVs[i].create_marker(hc::accelerator_scope); launchParamsList[i].stream->criticalData()._av.create_blocking_marker( cooperativeCF, hc::accelerator_scope); } launchParamsList[i].stream->criticalData().unlock(); } hip_internal::ihipHostFree(tls, mg_sync_ptr); for (int j = 0; j < numDevices; ++j) { hip_internal::ihipHostFree(tls, mg_info_ptr[j]); } return result; #else return hipErrorInvalidConfiguration; #endif } namespace hip_impl { hsa_executable_t executable_for(hipModule_t hmod) { return hmod->executable; } const char* hash_for(hipModule_t hmod) { return hmod->hash.c_str(); } hsa_agent_t this_agent() { GET_TLS(); auto ctx = ihipGetTlsDefaultCtx(); if (!ctx) throw runtime_error{"No active HIP context."}; auto device = ctx->getDevice(); if (!device) throw runtime_error{"No device available for HIP."}; ihipDevice_t* currentDevice = ihipGetDevice(device->_deviceId); if (!currentDevice) throw runtime_error{"No active device for HIP."}; return currentDevice->_hsaAgent; } struct Agent_global { Agent_global() : name(nullptr), address(nullptr), byte_cnt(0) {} Agent_global(const char* name, hipDeviceptr_t address, uint32_t byte_cnt) : name(nullptr), address(address), byte_cnt(byte_cnt) { if (name) this->name = strdup(name); } Agent_global& operator=(Agent_global&& t) { if (this == &t) return *this; if (name) free(name); name = t.name; address = t.address; byte_cnt = t.byte_cnt; t.name = nullptr; t.address = nullptr; t.byte_cnt = 0; return *this; } Agent_global(Agent_global&& t) : name(nullptr), address(nullptr), byte_cnt(0) { *this = std::move(t); } // not needed, delete them to prevent bugs Agent_global(const Agent_global&) = delete; Agent_global& operator=(Agent_global& t) = delete; ~Agent_global() { if (name) free(name); } char* name; hipDeviceptr_t address; uint32_t byte_cnt; }; template<typename ForwardIterator> std::pair<hipDeviceptr_t, std::size_t> read_global_description( ForwardIterator f, ForwardIterator l, const char* name) { const auto it = std::find_if(f, l, [=](const Agent_global& x) { return strcmp(x.name, name) == 0; }); return it == l ? std::make_pair(nullptr, 0u) : std::make_pair(it->address, it->byte_cnt); } std::vector<Agent_global> read_agent_globals(hsa_agent_t agent, hsa_executable_t executable); class agent_globals_impl { private: std::pair< std::mutex, std::unordered_map< std::string, std::vector<Agent_global>>> globals_from_module; std::unordered_map< hsa_agent_t, std::pair< std::once_flag, std::vector<Agent_global>>> globals_from_process; public: hipError_t read_agent_global_from_module(hipDeviceptr_t* dptr, size_t* bytes, hipModule_t hmod, const char* name) { // the key of the map would the hash of code object associated with the // hipModule_t instance std::string key(hash_for(hmod)); if (globals_from_module.second.count(key) == 0) { std::lock_guard<std::mutex> lck{globals_from_module.first}; if (globals_from_module.second.count(key) == 0) { globals_from_module.second.emplace( key, read_agent_globals(this_agent(), executable_for(hmod))); } } const auto it0 = globals_from_module.second.find(key); if (it0 == globals_from_module.second.cend()) { hip_throw( std::runtime_error{"agent_globals data structure corrupted."}); } std::tie(*dptr, *bytes) = read_global_description(it0->second.cbegin(), it0->second.cend(), name); // HACK for SWDEV-173477 // // For code objects with global symbols of length 0, ROCR runtime's fix // may not be working correctly. Therefore the // result from read_agent_globals() can't be trusted entirely. // // As a workaround to tame applications which depend on the existence of // global symbols with length 0, always return hipSuccess here. // // This behavior shall be reverted once ROCR runtime has been fixed to // address SWDEV-173477 and SWDEV-190701 //return *dptr ? hipSuccess : hipErrorNotFound; return hipSuccess; } hipError_t read_agent_global_from_process(hipDeviceptr_t* dptr, size_t* bytes, const char* name) { auto agent = this_agent(); std::call_once(globals_from_process[agent].first, [this](hsa_agent_t aa) { std::vector<Agent_global> tmp0; for (auto&& executable : hip_impl::get_program_state().impl->get_executables(aa)) { auto tmp1 = read_agent_globals(aa, executable); tmp0.insert(tmp0.end(), make_move_iterator(tmp1.begin()), make_move_iterator(tmp1.end())); } globals_from_process[aa].second = move(move(tmp0)); }, agent); const auto it = globals_from_process.find(agent); if (it == globals_from_process.cend()) return hipErrorNotInitialized; std::tie(*dptr, *bytes) = read_global_description(it->second.second.cbegin(), it->second.second.cend(), name); return *dptr ? hipSuccess : hipErrorNotFound; } }; agent_globals::agent_globals() : impl(new agent_globals_impl()) { if (!impl) hip_throw( std::runtime_error{"Error when constructing agent global data structures."}); } agent_globals::~agent_globals() { delete impl; } hipError_t agent_globals::read_agent_global_from_module(hipDeviceptr_t* dptr, size_t* bytes, hipModule_t hmod, const char* name) { return impl->read_agent_global_from_module(dptr, bytes, hmod, name); } hipError_t agent_globals::read_agent_global_from_process(hipDeviceptr_t* dptr, size_t* bytes, const char* name) { return impl->read_agent_global_from_process(dptr, bytes, name); } } // Namespace hip_impl. hipError_t hipModuleGetGlobal(hipDeviceptr_t* dptr, size_t* bytes, hipModule_t hmod, const char* name) { HIP_INIT_API(hipModuleGetGlobal, dptr, bytes, hmod, name); if (!dptr || !bytes || !hmod) return hipErrorInvalidValue; if (!name) return hipErrorNotInitialized; return hip_impl::get_agent_globals().read_agent_global_from_module(dptr, bytes, hmod, name); } namespace { inline void track(const hip_impl::Agent_global& x, hsa_agent_t agent) { GET_TLS(); tprintf(DB_MEM, " add variable '%s' with ptr=%p size=%u to tracker\n", x.name, x.address, x.byte_cnt); int deviceIndex =0; for ( deviceIndex = 0; deviceIndex < g_deviceCnt; deviceIndex++) { if(g_allAgents[deviceIndex] == agent) break; } auto device = ihipGetDevice(deviceIndex - 1); hc::AmPointerInfo ptr_info(nullptr, x.address, x.address, x.byte_cnt, device->_acc, true, false); hc::am_memtracker_add(x.address, ptr_info); #if USE_APP_PTR_FOR_CTX hc::am_memtracker_update(x.address, device->_deviceId, 0u, ihipGetTlsDefaultCtx()); #else hc::am_memtracker_update(x.address, device->_deviceId, 0u); #endif } template <typename Container = vector<hip_impl::Agent_global>> inline hsa_status_t copy_agent_global_variables(hsa_executable_t, hsa_agent_t agent, hsa_executable_symbol_t x, void* out) { using namespace hip_impl; assert(out); hsa_symbol_kind_t t = {}; hsa_executable_symbol_get_info(x, HSA_EXECUTABLE_SYMBOL_INFO_TYPE, &t); if (t == HSA_SYMBOL_KIND_VARIABLE) { hip_impl::Agent_global tmp(name(x).c_str(), address(x), size(x)); static_cast<Container*>(out)->push_back(std::move(tmp)); track(static_cast<Container*>(out)->back(),agent); } return HSA_STATUS_SUCCESS; } inline hsa_status_t remove_agent_global_variables(hsa_executable_t, hsa_agent_t agent, hsa_executable_symbol_t x, void* unused) { hsa_symbol_kind_t t = {}; hsa_executable_symbol_get_info(x, HSA_EXECUTABLE_SYMBOL_INFO_TYPE, &t); if (t == HSA_SYMBOL_KIND_VARIABLE) { hc::am_memtracker_remove(hip_impl::address(x)); } return HSA_STATUS_SUCCESS; } hsa_executable_symbol_t find_kernel_by_name(hsa_executable_t executable, const char* kname, hsa_agent_t* agent = nullptr) { using namespace hip_impl; pair<const char*, hsa_executable_symbol_t> r{kname, {}}; hsa_executable_iterate_agent_symbols( executable, agent ? *agent : this_agent(), [](hsa_executable_t, hsa_agent_t, hsa_executable_symbol_t x, void* s) { auto p = static_cast<pair<const char*, hsa_executable_symbol_t>*>(s); if (type(x) != HSA_SYMBOL_KIND_KERNEL) { return HSA_STATUS_SUCCESS; } if (name(x) != p->first) return HSA_STATUS_SUCCESS; p->second = x; return HSA_STATUS_INFO_BREAK; }, &r); return r.second; } string read_elf_file_as_string(const void* file) { // Precondition: file points to an ELF image that was BITWISE loaded // into process accessible memory, and not one loaded by // the loader. This is because in the latter case // alignment may differ, which will break the size // computation. // the image is Elf64, and matches endianness i.e. it is // Little Endian. if (!file) return {}; auto h = static_cast<const ELFIO::Elf64_Ehdr*>(file); auto s = static_cast<const char*>(file); // This assumes the common case of SHT being the last part of the ELF. auto sz = sizeof(ELFIO::Elf64_Ehdr) + h->e_shoff + h->e_shentsize * h->e_shnum; return string{s, s + sz}; } string code_object_blob_for_agent(const void* maybe_bundled_code, hsa_agent_t agent) { using namespace hip_impl; if (!maybe_bundled_code) return {}; Bundled_code_header tmp{maybe_bundled_code}; if (!valid(tmp)) return {}; const auto agent_isa = isa(agent); const auto it = find_if(bundles(tmp).cbegin(), bundles(tmp).cend(), [=](const Bundled_code& x) { return agent_isa == triple_to_hsa_isa(x.triple); ; }); if (it == bundles(tmp).cend()) return {}; return string{it->blob.cbegin(), it->blob.cend()}; } } // Unnamed namespace. namespace hip_impl { vector<Agent_global> read_agent_globals(hsa_agent_t agent, hsa_executable_t executable) { vector<Agent_global> r; hsa_executable_iterate_agent_symbols( executable, agent, copy_agent_global_variables, &r); return r; } void remove_agent_globals_from_tracker(hsa_agent_t agent, hsa_executable_t executable) { hsa_executable_iterate_agent_symbols(executable, agent, remove_agent_global_variables, NULL); } } // Namespace hip_impl. hipError_t hipModuleUnload(hipModule_t hmod) { HIP_INIT_API(hipModuleUnload, hmod); // TODO - improve this synchronization so it is thread-safe. // Currently we want for all inflight activity to complete, but don't prevent another // thread from launching new kernels before we finish this operation. ihipSynchronize(tls); // deleting ihipModule_t does not remove agent globals from hc_am memtracker hip_impl::remove_agent_globals_from_tracker(hip_impl::this_agent(), hip_impl::executable_for(hmod)); delete hmod; // The ihipModule_t dtor will clean everything up. hmod = nullptr; return ihipLogStatus(hipSuccess); } hipError_t ihipModuleGetFunction(TlsData *tls, hipFunction_t* func, hipModule_t hmod, const char* name, hsa_agent_t *agent = nullptr) { using namespace hip_impl; if (!func || !name) return hipErrorInvalidValue; auto ctx = ihipGetTlsDefaultCtx(); if (!ctx) return hipErrorInvalidContext; *func = new ihipModuleSymbol_t; if (!*func) return hipErrorInvalidValue; std::string name_str(name); auto kernel = find_kernel_by_name(hmod->executable, name_str.c_str(), agent); if (kernel.handle == 0u) { name_str.append(".kd"); kernel = find_kernel_by_name(hmod->executable, name_str.c_str(), agent); } if (kernel.handle == 0u) return hipErrorNotFound; // TODO: refactor the whole ihipThisThat, which is a mess and yields the // below, due to hipFunction_t being a pointer to ihipModuleSymbol_t. func[0][0] = *static_cast<hipFunction_t>( Kernel_descriptor{kernel_object(kernel), name_str, hmod->kernargs[name_str]}); return hipSuccess; } // Get kernel for the current hsa agent. hipError_t hipModuleGetFunction(hipFunction_t* hfunc, hipModule_t hmod, const char* name) { HIP_INIT_API(hipModuleGetFunction, hfunc, hmod, name); return ihipLogStatus(ihipModuleGetFunction(tls, hfunc, hmod, name)); } // Get kernel for the given hsa agent. Internal use only. hipError_t hipModuleGetFunctionEx(hipFunction_t* hfunc, hipModule_t hmod, const char* name, hsa_agent_t *agent) { HIP_INIT_API(hipModuleGetFunctionEx, hfunc, hmod, name, agent); return ihipLogStatus(ihipModuleGetFunction(tls, hfunc, hmod, name, agent)); } namespace { const amd_kernel_code_v3_t *header_v3(const ihipModuleSymbol_t& kd) { return reinterpret_cast<const amd_kernel_code_v3_t*>(kd._header); } hipFuncAttributes make_function_attributes(TlsData *tls, ihipModuleSymbol_t& kd) { hipFuncAttributes r{}; hipDeviceProp_t prop{}; hipGetDeviceProperties(&prop, ihipGetTlsDefaultCtx()->getDevice()->_deviceId); // TODO: at the moment there is no way to query the count of registers // available per CU, therefore we hardcode it to 64 KiRegisters. prop.regsPerBlock = prop.regsPerBlock ? prop.regsPerBlock : 64 * 1024; if (kd._is_code_object_v3) { r.binaryVersion = 0; // FIXME: should it be the ISA version or code // object format version? r.localSizeBytes = header_v3(kd)->private_segment_fixed_size; r.sharedSizeBytes = header_v3(kd)->group_segment_fixed_size; } else { r.localSizeBytes = kd._header->workitem_private_segment_byte_size; r.sharedSizeBytes = kd._header->workgroup_group_segment_byte_size; r.binaryVersion = kd._header->amd_machine_version_major * 10 + kd._header->amd_machine_version_minor; } r.maxDynamicSharedSizeBytes = prop.sharedMemPerBlock - r.sharedSizeBytes; size_t usedVGPRS = 0; size_t usedSGPRS = 0; size_t usedLDS = 0; getGprsLdsUsage(&kd, &usedVGPRS, &usedSGPRS, &usedLDS); r.numRegs = usedVGPRS; size_t wavefrontSize = prop.warpSize; size_t maxWavefrontsPerBlock = prop.maxThreadsPerBlock / wavefrontSize; size_t maxWavefrontsPerCU = min(prop.maxThreadsPerMultiProcessor / wavefrontSize, 32); const size_t numSIMD = 4; const size_t maxWavesPerSimd = maxWavefrontsPerCU / numSIMD; size_t maxWaves = 0; for (int i = 0; i < maxWavefrontsPerBlock; i++) { size_t wavefronts = i + 1; if (usedVGPRS > 0) { size_t availableVGPRs = (prop.regsPerBlock / wavefrontSize / numSIMD); size_t vgprs_alu_occupancy = numSIMD * std::min(maxWavesPerSimd, availableVGPRs / usedVGPRS); // Calculate blocks occupancy per CU based on VGPR usage if (vgprs_alu_occupancy < wavefronts) break; } if (usedSGPRS > 0) { const size_t availableSGPRs = (prop.gcnArch < 800) ? 512 : 800; size_t sgprs_alu_occupancy = numSIMD * ((usedSGPRS == 0) ? maxWavesPerSimd : std::min(maxWavesPerSimd, availableSGPRs / usedSGPRS)); // Calculate blocks occupancy per CU based on SGPR usage if (sgprs_alu_occupancy < wavefronts) break; } maxWaves = wavefronts; } r.maxThreadsPerBlock = maxWaves * wavefrontSize; r.ptxVersion = prop.major * 10 + prop.minor; // HIP currently presents itself as PTX 3.0. return r; } } // Unnamed namespace. hipError_t hipFuncGetAttributes(hipFuncAttributes* attr, const void* func) { HIP_INIT_API(hipFuncGetAttributes, attr, func); using namespace hip_impl; if (!attr) return ihipLogStatus(hipErrorInvalidValue); if (!func) return ihipLogStatus(hipErrorInvalidDeviceFunction); auto agent = this_agent(); auto kd = get_program_state().kernel_descriptor(reinterpret_cast<uintptr_t>(func), agent); if (!kd->_header) throw runtime_error{"Ill-formed Kernel_descriptor."}; *attr = make_function_attributes(tls, *kd); return ihipLogStatus(hipSuccess); } hipError_t hipFuncGetAttribute(int* value, hipFunction_attribute attrib, hipFunction_t hfunc) { HIP_INIT_API(hipFuncGetAttribute, value, attrib, hfunc); using namespace hip_impl; hipError_t retVal = hipSuccess; if (!value) return ihipLogStatus(hipErrorInvalidValue); hipFuncAttributes attr{}; attr = make_function_attributes(tls, *hfunc); switch(attrib) { case HIP_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: *value = (int) attr.sharedSizeBytes; break; case HIP_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: *value = attr.maxThreadsPerBlock; break; case HIP_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: *value = (int) attr.constSizeBytes; break; case HIP_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: *value = (int) attr.localSizeBytes; break; case HIP_FUNC_ATTRIBUTE_NUM_REGS: *value = attr.numRegs; break; case HIP_FUNC_ATTRIBUTE_PTX_VERSION: *value = attr.ptxVersion; break; case HIP_FUNC_ATTRIBUTE_BINARY_VERSION: *value = attr.binaryVersion; break; case HIP_FUNC_ATTRIBUTE_CACHE_MODE_CA: *value = attr.cacheModeCA; break; case HIP_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: *value = attr.maxDynamicSharedSizeBytes; break; case HIP_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: *value = attr.preferredShmemCarveout; break; default: retVal = hipErrorInvalidValue; } return ihipLogStatus(retVal); } hipError_t ihipModuleLoadData(TlsData *tls, hipModule_t* module, const void* image) { using namespace hip_impl; if (!module) return hipErrorInvalidValue; *module = new ihipModule_t; auto ctx = ihipGetTlsDefaultCtx(); if (!ctx) return hipErrorInvalidContext; // try extracting code object from image as fatbin. char name[64] = {}; hsa_agent_get_info(this_agent(), HSA_AGENT_INFO_NAME, name); if (auto *code_obj = __hipExtractCodeObjectFromFatBinary(image, name)) image = code_obj; hsa_executable_create_alt(HSA_PROFILE_FULL, HSA_DEFAULT_FLOAT_ROUNDING_MODE_DEFAULT, nullptr, &(*module)->executable); auto tmp = code_object_blob_for_agent(image, this_agent()); auto content = tmp.empty() ? read_elf_file_as_string(image) : tmp; (*module)->executable = get_program_state().load_executable( content.data(), content.size(), (*module)->executable, this_agent()); program_state_impl::read_kernarg_metadata(content, (*module)->kernargs); // compute the hash of the code object (*module)->hash = checksum(content.length(), content.data()); return (*module)->executable.handle ? hipSuccess : hipErrorUnknown; } hipError_t hipModuleLoadData(hipModule_t* module, const void* image) { HIP_INIT_API(hipModuleLoadData, module, image); return ihipLogStatus(ihipModuleLoadData(tls,module,image)); } hipError_t hipModuleLoad(hipModule_t* module, const char* fname) { HIP_INIT_API(hipModuleLoad, module, fname); if (!fname) return ihipLogStatus(hipErrorInvalidValue); ifstream file{fname}; if (!file.is_open()) return ihipLogStatus(hipErrorFileNotFound); vector<char> tmp{istreambuf_iterator<char>{file}, istreambuf_iterator<char>{}}; return ihipLogStatus(ihipModuleLoadData(tls, module, tmp.data())); } hipError_t hipModuleLoadDataEx(hipModule_t* module, const void* image, unsigned int numOptions, hipJitOption* options, void** optionValues) { HIP_INIT_API(hipModuleLoadDataEx, module, image, numOptions, options, optionValues); return ihipLogStatus(ihipModuleLoadData(tls, module, image)); } hipError_t hipModuleGetTexRef(textureReference** texRef, hipModule_t hmod, const char* name) { using namespace hip_impl; HIP_INIT_API(hipModuleGetTexRef, texRef, hmod, name); hipError_t ret = hipErrorNotFound; if (!texRef) return ihipLogStatus(hipErrorInvalidValue); if (!hmod || !name) return ihipLogStatus(hipErrorNotInitialized); auto addr = get_program_state().global_addr_by_name(name); if (addr == nullptr) return ihipLogStatus(hipErrorInvalidValue); *texRef = reinterpret_cast<textureReference*>(addr); return ihipLogStatus(hipSuccess); } hipError_t ihipOccupancyMaxPotentialBlockSize(TlsData *tls, uint32_t* gridSize, uint32_t* blockSize, hipFunction_t f, size_t dynSharedMemPerBlk, uint32_t blockSizeLimit) { using namespace hip_impl; auto ctx = ihipGetTlsDefaultCtx(); if (ctx == nullptr) { return hipErrorInvalidDevice; } hipDeviceProp_t prop{}; ihipGetDeviceProperties(&prop, ihipGetTlsDefaultCtx()->getDevice()->_deviceId); prop.regsPerBlock = prop.regsPerBlock ? prop.regsPerBlock : 64 * 1024; size_t usedVGPRS = 0; size_t usedSGPRS = 0; size_t usedLDS = 0; getGprsLdsUsage(f, &usedVGPRS, &usedSGPRS, &usedLDS); // try different workgroup sizes to find the maximum potential occupancy // based on the usage of VGPRs and LDS size_t wavefrontSize = prop.warpSize; size_t maxWavefrontsPerBlock = prop.maxThreadsPerBlock / wavefrontSize; // Due to SPI and private memory limitations, the max of wavefronts per CU in 32 size_t maxWavefrontsPerCU = min(prop.maxThreadsPerMultiProcessor / wavefrontSize, 32); const size_t numSIMD = 4; size_t maxActivWaves = 0; size_t maxWavefronts = 0; for (int i = 0; i < maxWavefrontsPerBlock; i++) { size_t wavefrontsPerWG = i + 1; // workgroup per CU is 40 for WG size of 1 wavefront; otherwise it is 16 size_t maxWorkgroupPerCU = (wavefrontsPerWG == 1) ? 40 : 16; size_t maxWavesWGLimited = min(wavefrontsPerWG * maxWorkgroupPerCU, maxWavefrontsPerCU); // Compute VGPR limited wavefronts per block size_t wavefrontsVGPRS; if (usedVGPRS == 0) { wavefrontsVGPRS = maxWavesWGLimited; } else { // find how many VGPRs are available for each SIMD size_t numVGPRsPerSIMD = (prop.regsPerBlock / wavefrontSize / numSIMD); wavefrontsVGPRS = (numVGPRsPerSIMD / usedVGPRS) * numSIMD; } size_t maxWavesVGPRSLimited = 0; if (wavefrontsVGPRS > maxWavesWGLimited) { maxWavesVGPRSLimited = maxWavesWGLimited; } else { maxWavesVGPRSLimited = (wavefrontsVGPRS / wavefrontsPerWG) * wavefrontsPerWG; } // Compute SGPR limited wavefronts per block size_t wavefrontsSGPRS; if (usedSGPRS == 0) { wavefrontsSGPRS = maxWavesWGLimited; } else { const size_t numSGPRsPerSIMD = (prop.gcnArch < 800) ? 512 : 800; wavefrontsSGPRS = (numSGPRsPerSIMD / usedSGPRS) * numSIMD; } size_t maxWavesSGPRSLimited = 0; if (wavefrontsSGPRS > maxWavesWGLimited) { maxWavesSGPRSLimited = maxWavesWGLimited; } else { maxWavesSGPRSLimited = (wavefrontsSGPRS / wavefrontsPerWG) * wavefrontsPerWG; } // Compute LDS limited wavefronts per block size_t wavefrontsLDS; if (usedLDS == 0) { wavefrontsLDS = maxWorkgroupPerCU * wavefrontsPerWG; } else { size_t availableSharedMemPerCU = prop.maxSharedMemoryPerMultiProcessor; size_t workgroupPerCU = availableSharedMemPerCU / (usedLDS + dynSharedMemPerBlk); wavefrontsLDS = min(workgroupPerCU, maxWorkgroupPerCU) * wavefrontsPerWG; } size_t maxWavesLDSLimited = min(wavefrontsLDS, maxWavefrontsPerCU); size_t activeWavefronts = 0; size_t tmp_min = (size_t)min(maxWavesLDSLimited, maxWavesWGLimited); tmp_min = min(maxWavesSGPRSLimited, tmp_min); activeWavefronts = min(maxWavesVGPRSLimited, tmp_min); if (maxActivWaves <= activeWavefronts) { maxActivWaves = activeWavefronts; maxWavefronts = wavefrontsPerWG; } } // determine the grid and block sizes for maximum potential occupancy size_t maxThreadsCnt = prop.maxThreadsPerMultiProcessor*prop.multiProcessorCount; if (blockSizeLimit > 0) { maxThreadsCnt = min(maxThreadsCnt, blockSizeLimit); } *blockSize = maxWavefronts * wavefrontSize; *gridSize = min((maxThreadsCnt + *blockSize - 1) / *blockSize, prop.multiProcessorCount); return hipSuccess; } hipError_t hipOccupancyMaxPotentialBlockSize(uint32_t* gridSize, uint32_t* blockSize, hipFunction_t f, size_t dynSharedMemPerBlk, uint32_t blockSizeLimit) { HIP_INIT_API(hipOccupancyMaxPotentialBlockSize, gridSize, blockSize, f, dynSharedMemPerBlk, blockSizeLimit); return ihipLogStatus(ihipOccupancyMaxPotentialBlockSize(tls, gridSize, blockSize, f, dynSharedMemPerBlk, blockSizeLimit)); } hipError_t hipOccupancyMaxActiveBlocksPerMultiprocessor( uint32_t* numBlocks, hipFunction_t f, uint32_t blockSize, size_t dynSharedMemPerBlk) { HIP_INIT_API(hipOccupancyMaxActiveBlocksPerMultiprocessor, numBlocks, f, blockSize, dynSharedMemPerBlk); return ihipLogStatus(ihipOccupancyMaxActiveBlocksPerMultiprocessor( tls, numBlocks, f, blockSize, dynSharedMemPerBlk)); } hipError_t hipDrvOccupancyMaxActiveBlocksPerMultiprocessor( int* numBlocks, hipFunction_t f, int blockSize, size_t dynSharedMemPerBlk) { HIP_INIT_API(hipDrvOccupancyMaxActiveBlocksPerMultiprocessor, numBlocks, f, blockSize, dynSharedMemPerBlk); return ihipLogStatus(ihipOccupancyMaxActiveBlocksPerMultiprocessor( tls, (uint32_t*) numBlocks, f, blockSize, dynSharedMemPerBlk)); } hipError_t hipOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( uint32_t* numBlocks, hipFunction_t f, uint32_t blockSize, size_t dynSharedMemPerBlk, unsigned int flags) { HIP_INIT_API(hipOccupancyMaxActiveBlocksPerMultiprocessorWithFlags, numBlocks, f, blockSize, dynSharedMemPerBlk, flags); return ihipLogStatus(ihipOccupancyMaxActiveBlocksPerMultiprocessor( tls, numBlocks, f, blockSize, dynSharedMemPerBlk)); } hipError_t hipDrvOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( int* numBlocks, hipFunction_t f, int blockSize, size_t dynSharedMemPerBlk, unsigned int flags) { HIP_INIT_API(hipDrvOccupancyMaxActiveBlocksPerMultiprocessorWithFlags, numBlocks, f, blockSize, dynSharedMemPerBlk, flags); return ihipLogStatus(ihipOccupancyMaxActiveBlocksPerMultiprocessor( tls, (uint32_t*) numBlocks, f, blockSize, dynSharedMemPerBlk)); } hipError_t hipLaunchKernel( const void* func_addr, dim3 numBlocks, dim3 dimBlocks, void** args, size_t sharedMemBytes, hipStream_t stream) { HIP_INIT_API(hipLaunchKernel,func_addr,numBlocks,dimBlocks,args,sharedMemBytes,stream); hipFunction_t kd = hip_impl::get_program_state().kernel_descriptor((std::uintptr_t)func_addr, hip_impl::target_agent(stream)); return hipModuleLaunchKernel(kd, numBlocks.x, numBlocks.y, numBlocks.z, dimBlocks.x, dimBlocks.y, dimBlocks.z, sharedMemBytes, stream, args, nullptr); }
1
9,150
it would be better to change `kds` into a `std::vector` then we don't need to explicitly free it
ROCm-Developer-Tools-HIP
cpp
@@ -113,6 +113,8 @@ def send_email_sendgrid(config, sender, subject, message, recipients, image_png) def send_email(subject, message, sender, recipients, image_png=None): + config = configuration.get_config() + subject = _prefix(subject) logger.debug("Emailing:\n" "-------------\n"
1
# Copyright (c) 2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import sys import logging import socket from luigi import configuration logger = logging.getLogger("luigi-interface") DEFAULT_CLIENT_EMAIL = 'luigi-client@%s' % socket.getfqdn() DEBUG = False def email_type(): return configuration.get_config().get('core', 'email-type', 'plain') def generate_email(sender, subject, message, recipients, image_png): import email import email.mime import email.mime.multipart import email.mime.text import email.mime.image msg_root = email.mime.multipart.MIMEMultipart('related') msg_text = email.mime.text.MIMEText(message, email_type()) msg_text.set_charset('utf-8') msg_root.attach(msg_text) if image_png: fp = open(image_png, 'rb') msg_image = email.mime.image.MIMEImage(fp.read(), 'png') fp.close() msg_root.attach(msg_image) msg_root['Subject'] = subject msg_root['From'] = sender msg_root['To'] = ','.join(recipients) return msg_root def wrap_traceback(traceback): if email_type() == 'html': return '<pre>%s</pre>' % traceback return traceback def send_email_smtp(config, sender, subject, message, recipients, image_png): import smtplib smtp_ssl = config.getboolean('core', 'smtp_ssl', False) smtp_host = config.get('core', 'smtp_host', 'localhost') smtp_port = config.getint('core', 'smtp_port', 0) smtp_local_hostname = config.get('core', 'smtp_local_hostname', None) smtp_timeout = config.getfloat('core', 'smtp_timeout', None) kwargs = dict(host=smtp_host, port=smtp_port, local_hostname=smtp_local_hostname) if smtp_timeout: kwargs['timeout'] = smtp_timeout smtp_login = config.get('core', 'smtp_login', None) smtp_password = config.get('core', 'smtp_password', None) smtp = smtplib.SMTP(**kwargs) if not smtp_ssl else smtplib.SMTP_SSL(**kwargs) if smtp_login and smtp_password: smtp.login(smtp_login, smtp_password) msg_root = generate_email(sender, subject, message, recipients, image_png) smtp.sendmail(sender, recipients, msg_root.as_string()) def send_email_ses(config, sender, subject, message, recipients, image_png): import boto.ses con = boto.ses.connect_to_region(config.get('email', 'region', 'us-east-1'), aws_access_key_id=config.get('email', 'AWS_ACCESS_KEY', None), aws_secret_access_key=config.get('email', 'AWS_SECRET_KEY', None)) msg_root = generate_email(sender, subject, message, recipients, image_png) con.send_raw_email(msg_root.as_string(), source=msg_root['From'], destinations=msg_root['To']) def send_email_sendgrid(config, sender, subject, message, recipients, image_png): import sendgrid client = sendgrid.SendGridClient(config.get('email', 'SENDGRID_USERNAME', None), config.get('email', 'SENDGRID_PASSWORD', None), raise_errors=True) to_send = sendgrid.Mail() to_send.add_to(recipients) to_send.set_from(sender) to_send.set_subject(subject) if email_type() == 'html': to_send.set_html(message) else: to_send.set_text(message) if image_png: to_send.add_attachment(image_png) client.send(to_send) def send_email(subject, message, sender, recipients, image_png=None): subject = _prefix(subject) logger.debug("Emailing:\n" "-------------\n" "To: %s\n" "From: %s\n" "Subject: %s\n" "Message:\n" "%s\n" "-------------", recipients, sender, subject, message) if not recipients or recipients == (None,): return if sys.stdout.isatty() or DEBUG: logger.info("Not sending email when running from a tty or in debug mode") return config = configuration.get_config() # Clean the recipients lists to allow multiple error-email addresses, comma # separated in client.cfg recipients_tmp = [] for r in recipients: recipients_tmp.extend(r.split(',')) # Replace original recipients with the clean list recipients = recipients_tmp email_sender_type = config.get('email', 'type', None) if email_sender_type == "ses": send_email_ses(config, sender, subject, message, recipients, image_png) elif email_sender_type == "sendgrid": send_email_sendgrid(config, sender, subject, message, recipients, image_png) else: send_email_smtp(config, sender, subject, message, recipients, image_png) def send_error_email(subject, message): """ Sends an email to the configured error-email. If no error-email is configured, then a message is logged. """ config = configuration.get_config() receiver = config.get('core', 'error-email', None) if receiver: sender = config.get('core', 'email-sender', DEFAULT_CLIENT_EMAIL) logger.info("Sending warning email to %r", receiver) send_email( subject=subject, message=message, sender=sender, recipients=(receiver,) ) else: logger.info("Skipping error email. Set `error-email` in the `core` " "section of the luigi config file to receive error " "emails.") def _prefix(subject): """ If the config has a special prefix for emails then this function adds this prefix. """ config = configuration.get_config() email_prefix = config.get('core', 'email-prefix', None) if email_prefix is not None: subject = "%s %s" % (email_prefix, subject) return subject
1
10,742
Looks like you're also changing logic and not only tests.
spotify-luigi
py
@@ -78,7 +78,7 @@ namespace Datadog.Trace.Tests.Sampling var expectedLimit = totalMilliseconds * actualIntervalLimit / 1_000; - var acceptableVariance = (actualIntervalLimit * 1.0); + var acceptableVariance = (actualIntervalLimit * 1.15); var upperLimit = expectedLimit + acceptableVariance; var lowerLimit = expectedLimit - acceptableVariance;
1
using System; using System.Collections.Concurrent; using System.Diagnostics; using System.Threading; using System.Threading.Tasks; using Datadog.Trace.Sampling; using Xunit; namespace Datadog.Trace.Tests.Sampling { [Collection(nameof(Datadog.Trace.Tests.Sampling))] public class RateLimiterTests { private const int DefaultLimitPerSecond = 100; private static readonly ThreadLocal<Random> Random = new ThreadLocal<Random>(() => new Random()); [Fact] public void One_Is_Allowed() { var traceContext = new TraceContext(Tracer.Instance); var spanContext = new SpanContext(null, traceContext, "Weeeee"); var span = new Span(spanContext, null); var rateLimiter = new RateLimiter(maxTracesPerInterval: null); var allowed = rateLimiter.Allowed(span); Assert.True(allowed); } [Fact] public void All_Traces_Disabled() { var rateLimiter = new RateLimiter(maxTracesPerInterval: 0); var allowedCount = AskTheRateLimiterABunchOfTimes(rateLimiter, 500); Assert.Equal(expected: 0, actual: allowedCount); } [Fact] public void All_Traces_Allowed() { var rateLimiter = new RateLimiter(maxTracesPerInterval: -1); var allowedCount = AskTheRateLimiterABunchOfTimes(rateLimiter, 500); Assert.Equal(expected: 500, actual: allowedCount); } [Fact] public void Only_100_Allowed_In_500_Burst_For_Default() { var rateLimiter = new RateLimiter(maxTracesPerInterval: null); var allowedCount = AskTheRateLimiterABunchOfTimes(rateLimiter, 500); Assert.Equal(expected: DefaultLimitPerSecond, actual: allowedCount); } [Fact] public void Limits_Approximately_To_Defaults() { Run_Limit_Test(intervalLimit: null, numberPerBurst: 200, numberOfBursts: 18, millisecondsBetweenBursts: 247); } [Fact] public void Limits_To_Custom_Amount_Per_Second() { Run_Limit_Test(intervalLimit: 500, numberPerBurst: 200, numberOfBursts: 18, millisecondsBetweenBursts: 247); } private static void Run_Limit_Test(int? intervalLimit, int numberPerBurst, int numberOfBursts, int millisecondsBetweenBursts) { var actualIntervalLimit = intervalLimit ?? DefaultLimitPerSecond; var test = new RateLimitLoadTest() { NumberPerBurst = numberPerBurst, TimeBetweenBursts = TimeSpan.FromMilliseconds(millisecondsBetweenBursts), NumberOfBursts = numberOfBursts }; var result = RunTest(intervalLimit, test); var totalMilliseconds = result.TimeElapsed.TotalMilliseconds; var expectedLimit = totalMilliseconds * actualIntervalLimit / 1_000; var acceptableVariance = (actualIntervalLimit * 1.0); var upperLimit = expectedLimit + acceptableVariance; var lowerLimit = expectedLimit - acceptableVariance; Assert.True( result.TotalAllowed >= lowerLimit && result.TotalAllowed <= upperLimit, $"Expected between {lowerLimit} and {upperLimit}, received {result.TotalAllowed} out of {result.TotalAttempted} within {totalMilliseconds} milliseconds."); // Rate should match for the last two intervals, which is a total of two seconds var numberOfBurstsWithinTwoIntervals = 2_000 / millisecondsBetweenBursts; var totalExpectedSent = numberOfBurstsWithinTwoIntervals * numberPerBurst; var totalExpectedAllowed = 2 * actualIntervalLimit; var expectedRate = totalExpectedAllowed / (float)totalExpectedSent; var lowestRate = expectedRate - 0.40f; if (lowestRate < 0) { lowestRate = expectedRate / 2; } var highestRate = expectedRate + 0.40f; Assert.True( result.ReportedRate >= lowestRate && result.ReportedRate <= highestRate, $"Expected rate between {lowestRate} and {highestRate}, received {result.ReportedRate}."); } private static int AskTheRateLimiterABunchOfTimes(RateLimiter rateLimiter, int howManyTimes) { var traceContext = new TraceContext(Tracer.Instance); var spanContext = new SpanContext(null, traceContext, "Weeeee"); var span = new Span(spanContext, null); var remaining = howManyTimes; var allowedCount = 0; while (remaining-- > 0) { var allowed = rateLimiter.Allowed(span); if (allowed) { allowedCount++; } } return allowedCount; } private static RateLimitResult RunTest(int? intervalLimit, RateLimitLoadTest test) { var parallelism = test.NumberPerBurst; if (parallelism > 10) { parallelism = 10; } var result = new RateLimitResult(); var limiter = new RateLimiter(maxTracesPerInterval: intervalLimit); var traceContext = new TraceContext(Tracer.Instance); var barrier = new Barrier(parallelism + 1); var numberPerThread = test.NumberPerBurst / parallelism; var workers = new Task[parallelism]; for (int i = 0; i < workers.Length; i++) { workers[i] = Task.Factory.StartNew( () => { var stopwatch = new Stopwatch(); for (var i = 0; i < test.NumberOfBursts; i++) { // Wait for every worker to be ready for next burst barrier.SignalAndWait(); stopwatch.Restart(); for (int j = 0; j < numberPerThread; j++) { var spanContext = new SpanContext(null, traceContext, "Weeeee"); var span = new Span(spanContext, null); if (limiter.Allowed(span)) { result.Allowed.Add(span.SpanId); } else { result.Denied.Add(span.SpanId); } } var remainingTime = (test.TimeBetweenBursts - stopwatch.Elapsed).TotalMilliseconds; if (remainingTime > 0) { Thread.Sleep((int)remainingTime); } } }, TaskCreationOptions.LongRunning); } // Wait for all workers to be ready barrier.SignalAndWait(); var sw = Stopwatch.StartNew(); // We do not need to synchronize with workers anymore barrier.RemoveParticipant(); // Wait for workers to finish Task.WaitAll(workers); result.TimeElapsed = sw.Elapsed; result.RateLimiter = limiter; result.ReportedRate = limiter.GetEffectiveRate(); return result; } private class RateLimitLoadTest { public int NumberPerBurst { get; set; } public TimeSpan TimeBetweenBursts { get; set; } public int NumberOfBursts { get; set; } } private class RateLimitResult { public RateLimiter RateLimiter { get; set; } public TimeSpan TimeElapsed { get; set; } public ConcurrentBag<ulong> Allowed { get; } = new ConcurrentBag<ulong>(); public ConcurrentBag<ulong> Denied { get; } = new ConcurrentBag<ulong>(); public float ReportedRate { get; set; } public int TotalAttempted => Allowed.Count + Denied.Count; public int TotalAllowed => Allowed.Count; } } }
1
17,586
Is it possible that test failures here are real and that we should improve the rate limiting logic? Or are we ok with rate limits to be exceeded by 15%?
DataDog-dd-trace-dotnet
.cs
@@ -71,6 +71,8 @@ class ImageProvider extends FileProvider throw new \LogicException("The 'srcset' and 'picture' options must not be used simultaneously."); } + $attrPrefix = isset($options['lazy']) && true === $options['lazy'] ? 'data-' : ''; + if (MediaProviderInterface::FORMAT_REFERENCE === $format) { $box = $media->getBox(); } else {
1
<?php declare(strict_types=1); /* * This file is part of the Sonata Project package. * * (c) Thomas Rabaix <[email protected]> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Sonata\MediaBundle\Provider; use Gaufrette\Filesystem; use Imagine\Image\ImagineInterface; use Sonata\MediaBundle\CDN\CDNInterface; use Sonata\MediaBundle\Generator\GeneratorInterface; use Sonata\MediaBundle\Metadata\MetadataBuilderInterface; use Sonata\MediaBundle\Model\MediaInterface; use Sonata\MediaBundle\Thumbnail\ThumbnailInterface; use Symfony\Component\HttpFoundation\File\File; use Symfony\Component\HttpFoundation\File\UploadedFile; class ImageProvider extends FileProvider { /** * @var ImagineInterface */ protected $imagineAdapter; /** * @param string $name * @param Filesystem $filesystem * @param CDNInterface $cdn * @param GeneratorInterface $pathGenerator * @param ThumbnailInterface $thumbnail * @param array $allowedExtensions * @param array $allowedMimeTypes * @param ImagineInterface $adapter * @param MetadataBuilderInterface $metadata */ public function __construct($name, Filesystem $filesystem, CDNInterface $cdn, GeneratorInterface $pathGenerator, ThumbnailInterface $thumbnail, array $allowedExtensions, array $allowedMimeTypes, ImagineInterface $adapter, MetadataBuilderInterface $metadata = null) { parent::__construct($name, $filesystem, $cdn, $pathGenerator, $thumbnail, $allowedExtensions, $allowedMimeTypes, $metadata); $this->imagineAdapter = $adapter; } /** * {@inheritdoc} */ public function getProviderMetadata() { return new Metadata( $this->getName(), $this->getName().'.description', null, 'SonataMediaBundle', ['class' => 'fa fa-picture-o'] ); } /** * {@inheritdoc} */ public function getHelperProperties(MediaInterface $media, $format, $options = []) { if (isset($options['srcset'], $options['picture'])) { throw new \LogicException("The 'srcset' and 'picture' options must not be used simultaneously."); } if (MediaProviderInterface::FORMAT_REFERENCE === $format) { $box = $media->getBox(); } else { $resizerFormat = $this->getFormat($format); if (false === $resizerFormat) { throw new \RuntimeException(sprintf('The image format "%s" is not defined. Is the format registered in your ``sonata_media`` configuration?', $format)); } $box = $this->resizer->getBox($media, $resizerFormat); } $mediaWidth = $box->getWidth(); $params = [ 'alt' => $media->getName(), 'title' => $media->getName(), 'src' => $this->generatePublicUrl($media, $format), 'width' => $mediaWidth, 'height' => $box->getHeight(), ]; if (isset($options['picture'])) { $pictureParams = []; foreach ($options['picture'] as $key => $pictureFormat) { $formatName = $this->getFormatName($media, $pictureFormat); $settings = $this->getFormat($formatName); $src = $this->generatePublicUrl($media, $formatName); $mediaQuery = \is_string($key) ? $key : sprintf('(max-width: %dpx)', $this->resizer->getBox($media, $settings)->getWidth()); $pictureParams['source'][] = ['media' => $mediaQuery, 'srcset' => $src]; } unset($options['picture']); $pictureParams['img'] = $params + $options; $params = ['picture' => $pictureParams]; } elseif (MediaProviderInterface::FORMAT_ADMIN !== $format) { $srcSetFormats = $this->getFormats(); if (isset($options['srcset']) && \is_array($options['srcset'])) { $srcSetFormats = []; foreach ($options['srcset'] as $srcSetFormat) { $formatName = $this->getFormatName($media, $srcSetFormat); $srcSetFormats[$formatName] = $this->getFormat($formatName); } unset($options['srcset']); // Make sure the requested format is also in the srcSetFormats if (!isset($srcSetFormats[$format])) { $srcSetFormats[$format] = $this->getFormat($format); } } if (!isset($options['srcset'])) { $srcSet = []; foreach ($srcSetFormats as $providerFormat => $settings) { // Check if format belongs to the current media's context if (0 === strpos($providerFormat, $media->getContext())) { $width = $this->resizer->getBox($media, $settings)->getWidth(); $srcSet[] = sprintf('%s %dw', $this->generatePublicUrl($media, $providerFormat), $width); } } // The reference format is not in the formats list $srcSet[] = sprintf( '%s %dw', $this->generatePublicUrl($media, MediaProviderInterface::FORMAT_REFERENCE), $media->getBox()->getWidth() ); $params['srcset'] = implode(', ', $srcSet); } $params['sizes'] = sprintf('(max-width: %1$dpx) 100vw, %1$dpx', $mediaWidth); } return array_merge($params, $options); } /** * {@inheritdoc} */ public function updateMetadata(MediaInterface $media, $force = true) { try { if (!$media->getBinaryContent() instanceof \SplFileInfo) { // this is now optimized at all!!! $path = tempnam(sys_get_temp_dir(), 'sonata_update_metadata'); $fileObject = new \SplFileObject($path, 'w'); $fileObject->fwrite($this->getReferenceFile($media)->getContent()); } else { $fileObject = $media->getBinaryContent(); } $image = $this->imagineAdapter->open($fileObject->getPathname()); $size = $image->getSize(); $media->setSize($fileObject->getSize()); $media->setWidth($size->getWidth()); $media->setHeight($size->getHeight()); } catch (\LogicException $e) { $media->setProviderStatus(MediaInterface::STATUS_ERROR); $media->setSize(0); $media->setWidth(0); $media->setHeight(0); } } /** * {@inheritdoc} */ public function generatePublicUrl(MediaInterface $media, $format) { if (MediaProviderInterface::FORMAT_REFERENCE === $format) { $path = $this->getReferenceImage($media); } else { $path = $this->thumbnail->generatePublicUrl($this, $media, $format); } // if $path is already an url, no further action is required if (null !== parse_url($path, PHP_URL_SCHEME)) { return $path; } return $this->getCdn()->getPath($path, $media->getCdnIsFlushable()); } /** * {@inheritdoc} */ public function generatePrivateUrl(MediaInterface $media, $format) { return $this->thumbnail->generatePrivateUrl($this, $media, $format); } /** * {@inheritdoc} */ protected function doTransform(MediaInterface $media) { parent::doTransform($media); if ($media->getBinaryContent() instanceof UploadedFile) { $fileName = $media->getBinaryContent()->getClientOriginalName(); } elseif ($media->getBinaryContent() instanceof File) { $fileName = $media->getBinaryContent()->getFilename(); } else { // Should not happen, FileProvider should throw an exception in that case return; } if (!\in_array(strtolower(pathinfo($fileName, PATHINFO_EXTENSION)), $this->allowedExtensions, true) || !\in_array($media->getBinaryContent()->getMimeType(), $this->allowedMimeTypes, true)) { return; } try { $image = $this->imagineAdapter->open($media->getBinaryContent()->getPathname()); } catch (\RuntimeException $e) { $media->setProviderStatus(MediaInterface::STATUS_ERROR); return; } $size = $image->getSize(); $media->setWidth($size->getWidth()); $media->setHeight($size->getHeight()); $media->setProviderStatus(MediaInterface::STATUS_OK); } }
1
10,617
Shouldn't there be some kind of validation somewhere? IMO there should be an exception if `$options['lazy']` is not a boolean.
sonata-project-SonataMediaBundle
php
@@ -1,5 +1,7 @@ // Copyright (c) Microsoft. All rights reserved. +using Microsoft.VisualStudio.TestPlatform.ObjectModel.Logging; + namespace Microsoft.TestPlatform.VsTestConsole.TranslationLayer { using System;
1
// Copyright (c) Microsoft. All rights reserved. namespace Microsoft.TestPlatform.VsTestConsole.TranslationLayer { using System; using System.Collections.Generic; using System.Linq; using System.Threading; using System.Threading.Tasks; using Microsoft.TestPlatform.VsTestConsole.TranslationLayer.Interfaces; using Microsoft.TestPlatform.VsTestConsole.TranslationLayer.Payloads; using Microsoft.VisualStudio.TestPlatform.CommunicationUtilities; using Microsoft.VisualStudio.TestPlatform.CommunicationUtilities.Interfaces; using Microsoft.VisualStudio.TestPlatform.CommunicationUtilities.ObjectModel; using Microsoft.VisualStudio.TestPlatform.ObjectModel; using Microsoft.VisualStudio.TestPlatform.ObjectModel.Client; using Microsoft.VisualStudio.TestPlatform.ObjectModel.Client.Interfaces; /// <summary> /// VstestConsoleRequestSender for sending requests to Vstest.console.exe /// </summary> internal class VsTestConsoleRequestSender : ITranslationLayerRequestSender { private ICommunicationManager communicationManager; private IDataSerializer dataSerializer; private ManualResetEvent handShakeComplete = new ManualResetEvent(false); private bool handShakeSuccessful = false; #region Constructor public VsTestConsoleRequestSender() : this(new SocketCommunicationManager(), JsonDataSerializer.Instance) { } internal VsTestConsoleRequestSender(ICommunicationManager communicationManager, IDataSerializer dataSerializer) { this.communicationManager = communicationManager; this.dataSerializer = dataSerializer; } #endregion #region ITranslationLayerRequestSender /// <summary> /// Initializes Communication with vstest.console.exe /// Hosts a communication channel and asynchronously connects to vstest.console.exe /// </summary> /// <returns>Port Number of hosted server on this side</returns> public int InitializeCommunication() { this.handShakeSuccessful = false; this.handShakeComplete.Reset(); int port = -1; try { port = this.communicationManager.HostServer(); this.communicationManager.AcceptClientAsync(); Task.Run(() => { this.communicationManager.WaitForClientConnection(Timeout.Infinite); handShakeSuccessful = HandShakeWithVsTestConsole(); this.handShakeComplete.Set(); }); } catch (Exception ex) { EqtTrace.Error("VsTestConsoleRequestSender: Error initializing communication with VstestConsole: {0}", ex); this.handShakeComplete.Set(); } return port; } /// <summary> /// Waits for Vstest.console.exe Connection for a given timeout /// </summary> /// <param name="clientConnectionTimeout">Time to wait for the connection</param> /// <returns>True, if successful</returns> public bool WaitForRequestHandlerConnection(int clientConnectionTimeout) { var waitSucess = this.handShakeComplete.WaitOne(clientConnectionTimeout); return waitSucess && handShakeSuccessful; } /// <summary> /// Initializes the Extensions while probing additional extension paths /// </summary> /// <param name="pathToAdditionalExtensions">Paths to check for additional extensions</param> public void InitializeExtensions(IEnumerable<string> pathToAdditionalExtensions) { this.communicationManager.SendMessage(MessageType.ExtensionsInitialize, pathToAdditionalExtensions); } /// <summary> /// Discover Tests using criteria and send events through eventHandler /// </summary> /// <param name="discoveryCriteria"></param> /// <param name="eventHandler"></param> public void DiscoverTests(IEnumerable<string> sources, string runSettings, ITestDiscoveryEventsHandler eventHandler) { this.communicationManager.SendMessage(MessageType.StartDiscovery, new DiscoveryRequestPayload() { Sources = sources, RunSettings = runSettings }); this.ListenAndReportTestCases(eventHandler); } /// <summary> /// Starts the TestRun with given sources and criteria /// </summary> /// <param name="sources">Sources for test run</param> /// <param name="runSettings">RunSettings for test run</param> /// <param name="runEventsHandler">EventHandler for test run events</param> public void StartTestRun(IEnumerable<string> sources, string runSettings, ITestRunEventsHandler runEventsHandler) { this.communicationManager.SendMessage(MessageType.TestRunAllSourcesWithDefaultHost, new TestRunRequestPayload() { Sources = sources.ToList(), RunSettings = runSettings }); ListenAndReportTestResults(runEventsHandler, null); } /// <summary> /// Starts the TestRun with given test cases and criteria /// </summary> /// <param name="testCases">TestCases to run</param> /// <param name="runSettings">RunSettings for test run</param> /// <param name="runEventsHandler">EventHandler for test run events</param> public void StartTestRun(IEnumerable<TestCase> testCases, string runSettings, ITestRunEventsHandler runEventsHandler) { this.communicationManager.SendMessage(MessageType.TestRunSelectedTestCasesDefaultHost, new TestRunRequestPayload() { TestCases = testCases.ToList(), RunSettings = runSettings }); ListenAndReportTestResults(runEventsHandler, null); } /// <summary> /// Starts the TestRun with given sources and criteria with custom test host /// </summary> /// <param name="sources">Sources for test run</param> /// <param name="runSettings">RunSettings for test run</param> /// <param name="runEventsHandler">EventHandler for test run events</param> public void StartTestRunWithCustomHost(IEnumerable<string> sources, string runSettings, ITestRunEventsHandler runEventsHandler, ITestHostLauncher customHostLauncher) { this.communicationManager.SendMessage(MessageType.GetTestRunnerProcessStartInfoForRunAll, new TestRunRequestPayload() { Sources = sources.ToList(), RunSettings = runSettings, DebuggingEnabled = customHostLauncher.IsDebug }); ListenAndReportTestResults(runEventsHandler, customHostLauncher); } /// <summary> /// Starts the TestRun with given test cases and criteria with custom test host /// </summary> /// <param name="testCases">TestCases to run</param> /// <param name="runSettings">RunSettings for test run</param> /// <param name="runEventsHandler">EventHandler for test run events</param> public void StartTestRunWithCustomHost(IEnumerable<TestCase> testCases, string runSettings, ITestRunEventsHandler runEventsHandler, ITestHostLauncher customHostLauncher) { this.communicationManager.SendMessage(MessageType.GetTestRunnerProcessStartInfoForRunSelected, new TestRunRequestPayload() { TestCases = testCases.ToList(), RunSettings = runSettings, DebuggingEnabled = customHostLauncher.IsDebug }); ListenAndReportTestResults(runEventsHandler, customHostLauncher); } /// <summary> /// Send Cancel TestRun message /// </summary> public void CancelTestRun() { this.communicationManager.SendMessage(MessageType.CancelTestRun); } /// <summary> /// Send Abort TestRun message /// </summary> public void AbortTestRun() { this.communicationManager.SendMessage(MessageType.AbortTestRun); } public void Close() { this.Dispose(); } public void EndSession() { this.communicationManager.SendMessage(MessageType.SessionEnd); } public void Dispose() { this.communicationManager?.StopServer(); } #endregion private bool HandShakeWithVsTestConsole() { var success = false; var message = this.communicationManager.ReceiveMessage(); if (message.MessageType == MessageType.SessionConnected) { this.communicationManager.SendMessage(MessageType.VersionCheck); message = this.communicationManager.ReceiveMessage(); if (message.MessageType == MessageType.VersionCheck) { var testPlatformVersion = this.dataSerializer.DeserializePayload<int>(message); success = testPlatformVersion == 1; if (!success) { EqtTrace.Error("VsTestConsoleRequestSender: VersionCheck Failed. TestPlatform Version: {0}", testPlatformVersion); } } else { EqtTrace.Error("VsTestConsoleRequestSender: VersionCheck Message Expected but different message received: Received MessageType: {0}", message.MessageType); } } else { EqtTrace.Error("VsTestConsoleRequestSender: SessionConnected Message Expected but different message received: Received MessageType: {0}", message.MessageType); } return success; } private void ListenAndReportTestCases(ITestDiscoveryEventsHandler eventHandler) { var isDiscoveryComplete = false; // Cycle through the messages that the vstest.console sends. // Currently each of the operations are not separate tasks since they should not each take much time. // This is just a notification. while (!isDiscoveryComplete) { try { var message = this.communicationManager.ReceiveMessage(); if (string.Equals(MessageType.TestCasesFound, message.MessageType)) { var testCases = this.dataSerializer.DeserializePayload<IEnumerable<TestCase>>(message); eventHandler.HandleDiscoveredTests(testCases); } else if (string.Equals(MessageType.DiscoveryComplete, message.MessageType)) { var discoveryCompletePayload = this.dataSerializer.DeserializePayload<DiscoveryCompletePayload>(message); eventHandler.HandleDiscoveryComplete(discoveryCompletePayload.TotalTests, discoveryCompletePayload.LastDiscoveredTests, discoveryCompletePayload.IsAborted); isDiscoveryComplete = true; } else if (string.Equals(MessageType.TestMessage, message.MessageType)) { var testMessagePayload = this.dataSerializer.DeserializePayload<TestMessagePayload>(message); eventHandler.HandleLogMessage(testMessagePayload.MessageLevel, testMessagePayload.Message); } } catch (Exception ex) { EqtTrace.Error("VsTestConsoleRequestSender: TestDiscovery: Message Deserialization failed with {0}", ex); // Notify of a discovery complete and bail out. eventHandler.HandleDiscoveryComplete(0, null, false); isDiscoveryComplete = true; } } } private void ListenAndReportTestResults(ITestRunEventsHandler eventHandler, ITestHostLauncher customHostLauncher) { var isTestRunComplete = false; // Cycle through the messages that the testhost sends. // Currently each of the operations are not separate tasks since they should not each take much time. This is just a notification. while (!isTestRunComplete) { try { var message = this.communicationManager.ReceiveMessage(); if (string.Equals(MessageType.TestRunStatsChange, message.MessageType)) { var testRunChangedArgs = this.dataSerializer.DeserializePayload<TestRunChangedEventArgs>(message); eventHandler.HandleTestRunStatsChange(testRunChangedArgs); } else if (string.Equals(MessageType.ExecutionComplete, message.MessageType)) { var testRunCompletePayload = this.dataSerializer.DeserializePayload<TestRunCompletePayload>(message); eventHandler.HandleTestRunComplete( testRunCompletePayload.TestRunCompleteArgs, testRunCompletePayload.LastRunTests, testRunCompletePayload.RunAttachments, testRunCompletePayload.ExecutorUris); isTestRunComplete = true; } else if (string.Equals(MessageType.TestMessage, message.MessageType)) { var testMessagePayload = this.dataSerializer.DeserializePayload<TestMessagePayload>(message); eventHandler.HandleLogMessage(testMessagePayload.MessageLevel, testMessagePayload.Message); } else if (string.Equals(MessageType.CustomTestHostLaunch, message.MessageType)) { var testProcessStartInfo = this.dataSerializer.DeserializePayload<TestProcessStartInfo>(message); int processId = (customHostLauncher != null) ? customHostLauncher.LaunchTestHost(testProcessStartInfo) : -1; this.communicationManager.SendMessage(MessageType.CustomTestHostLaunchCallback, processId); } } catch (Exception exception) { EqtTrace.Error("VsTestConsoleRequestSender: TestExecution: Error Processing Request from DesignModeClient: {0}", exception); // notify of a test run complete and bail out. eventHandler.HandleTestRunComplete(new TestRunCompleteEventArgs(null, false, true, exception, null, TimeSpan.MinValue), null, null, null); isTestRunComplete = true; } } } } }
1
11,166
nit: please move it inside namespace.
microsoft-vstest
.cs
@@ -15,7 +15,7 @@ module Bolt shell-command tmpdir tty - ].freeze + ].concat(RUN_AS_OPTIONS).sort.freeze DEFAULTS = { 'cleanup' => true
1
# frozen_string_literal: true require 'bolt/error' require 'bolt/config/transport/base' module Bolt class Config module Transport class Docker < Base OPTIONS = %w[ cleanup host interpreters service-url shell-command tmpdir tty ].freeze DEFAULTS = { 'cleanup' => true }.freeze private def validate super if @config['interpreters'] @config['interpreters'] = normalize_interpreters(@config['interpreters']) end end end end end end
1
18,457
The inventory schema needs to be regenerated to include these options. Looks like the CI job didn't get triggered since the paths don't include `lib/bolt/transport/**`.
puppetlabs-bolt
rb
@@ -0,0 +1,14 @@ +WELCOME_DIALOG_TEXT = ( + "Welcome to NVDA dialog Welcome to NVDA!\n" + "Most commands for controlling NVDA require you to hold down the NVDA key while pressing other keys.\n" + "By default, the numpad Insert and main Insert keys may both be used as the NVDA key.\n" + "You can also configure NVDA to use the CapsLock as the NVDA key.\n" + "Press NVDA+n at any time to activate the NVDA menu.\n" + "From this menu, you can configure NVDA, get help and access other NVDA functions.\n" + "Options grouping\n" + "Keyboard layout: combo box desktop collapsed Alt+k" +) +QUIT_DIALOG_TEXT = ( + "Exit NVDA dialog\n" + "What would you like to do? combo box Exit collapsed Alt+d" +)
1
1
22,730
This might break if a user runs the system tests with a system language other than English, in which the user default language differs.
nvaccess-nvda
py
@@ -3,7 +3,7 @@ require 'rspec/expectations' RSpec::Matchers.define :have_errors do |expected| match do - actual.body.match(/Error/) + actual.body.match(/Error\:/) end failure_message do |actual|
1
require 'rspec/expectations' RSpec::Matchers.define :have_errors do |expected| match do actual.body.match(/Error/) end failure_message do |actual| "expected would have errors on the page." end failure_message_when_negated do |actual| "expected would not have errors on the page." end end
1
18,115
This was raising intermittent errors, since Lorem ipsum contains the word "error"
DMPRoadmap-roadmap
rb
@@ -214,6 +214,7 @@ bool RTPSDomain::removeRTPSParticipant( { if (p != nullptr) { + assert((p->mp_impl != nullptr) && "This participant has been previously invalidated"); p->mp_impl->disable(); std::unique_lock<std::mutex> lock(m_mutex);
1
// Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima). // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* * @file RTPSDomain.cpp */ #include <fastdds/rtps/RTPSDomain.h> #include <chrono> #include <thread> #include <cstdlib> #include <regex> #include <fastdds/dds/log/Log.hpp> #include <fastdds/rtps/history/WriterHistory.h> #include <fastdds/rtps/participant/RTPSParticipant.h> #include <fastdds/rtps/reader/RTPSReader.h> #include <fastdds/rtps/writer/RTPSWriter.h> #include <rtps/transport/UDPv4Transport.h> #include <rtps/transport/UDPv6Transport.h> #include <rtps/transport/test_UDPv4Transport.h> #include <fastrtps/utils/IPFinder.h> #include <fastrtps/utils/IPLocator.h> #include <fastrtps/utils/System.h> #include <fastrtps/utils/md5.h> #include <fastrtps/xmlparser/XMLProfileManager.h> #include <rtps/RTPSDomainImpl.hpp> #include <rtps/participant/RTPSParticipantImpl.h> #include <rtps/common/GuidUtils.hpp> #include <utils/Host.hpp> namespace eprosima { namespace fastrtps { namespace rtps { static void guid_prefix_create( uint32_t ID, GuidPrefix_t& guidP) { eprosima::fastdds::rtps::GuidUtils::instance().guid_prefix_create(ID, guidP); } std::mutex RTPSDomain::m_mutex; std::atomic<uint32_t> RTPSDomain::m_maxRTPSParticipantID(1); std::vector<RTPSDomain::t_p_RTPSParticipant> RTPSDomain::m_RTPSParticipants; std::set<uint32_t> RTPSDomain::m_RTPSParticipantIDs; void RTPSDomain::stopAll() { std::unique_lock<std::mutex> lock(m_mutex); logInfo(RTPS_PARTICIPANT, "DELETING ALL ENDPOINTS IN THIS DOMAIN"); while (m_RTPSParticipants.size() > 0) { RTPSDomain::t_p_RTPSParticipant participant = m_RTPSParticipants.back(); m_RTPSParticipantIDs.erase(m_RTPSParticipantIDs.find(participant.second->getRTPSParticipantID())); m_RTPSParticipants.pop_back(); lock.unlock(); RTPSDomain::removeRTPSParticipant_nts(participant); lock.lock(); } logInfo(RTPS_PARTICIPANT, "RTPSParticipants deleted correctly "); std::this_thread::sleep_for(std::chrono::milliseconds(100)); } RTPSParticipant* RTPSDomain::createParticipant( uint32_t domain_id, const RTPSParticipantAttributes& attrs, RTPSParticipantListener* listen) { return createParticipant(domain_id, true, attrs, listen); } RTPSParticipant* RTPSDomain::createParticipant( uint32_t domain_id, bool enabled, const RTPSParticipantAttributes& attrs, RTPSParticipantListener* listen) { logInfo(RTPS_PARTICIPANT, ""); RTPSParticipantAttributes PParam = attrs; if (PParam.builtin.discovery_config.leaseDuration < c_TimeInfinite && PParam.builtin.discovery_config.leaseDuration <= PParam.builtin.discovery_config.leaseDuration_announcementperiod) { logError(RTPS_PARTICIPANT, "RTPSParticipant Attributes: LeaseDuration should be >= leaseDuration announcement period"); return nullptr; } uint32_t ID; { std::lock_guard<std::mutex> guard(m_mutex); if (PParam.participantID < 0) { ID = getNewId(); while (m_RTPSParticipantIDs.insert(ID).second == false) { ID = getNewId(); } } else { ID = PParam.participantID; if (m_RTPSParticipantIDs.insert(ID).second == false) { logError(RTPS_PARTICIPANT, "RTPSParticipant with the same ID already exists"); return nullptr; } } } if (!PParam.defaultUnicastLocatorList.isValid()) { logError(RTPS_PARTICIPANT, "Default Unicast Locator List contains invalid Locator"); return nullptr; } if (!PParam.defaultMulticastLocatorList.isValid()) { logError(RTPS_PARTICIPANT, "Default Multicast Locator List contains invalid Locator"); return nullptr; } PParam.participantID = ID; LocatorList_t loc; IPFinder::getIP4Address(&loc); // Generate a new GuidPrefix_t GuidPrefix_t guidP; guid_prefix_create(ID, guidP); RTPSParticipant* p = new RTPSParticipant(nullptr); RTPSParticipantImpl* pimpl = nullptr; // If we force the participant to have a specific prefix we must define a different persistence GuidPrefix_t that // would ensure builtin endpoints are able to differentiate between a communication loss and a participant recovery if (PParam.prefix != c_GuidPrefix_Unknown) { pimpl = new RTPSParticipantImpl(domain_id, PParam, PParam.prefix, guidP, p, listen); } else { pimpl = new RTPSParticipantImpl(domain_id, PParam, guidP, p, listen); } // Above constructors create the sender resources. If a given listening port cannot be allocated an iterative // mechanism will allocate another by default. Change the default listening port is unacceptable for server // discovery. if ((PParam.builtin.discovery_config.discoveryProtocol == DiscoveryProtocol_t::SERVER || PParam.builtin.discovery_config.discoveryProtocol == DiscoveryProtocol_t::BACKUP) && pimpl->did_mutation_took_place_on_meta( PParam.builtin.metatrafficMulticastLocatorList, PParam.builtin.metatrafficUnicastLocatorList)) { // we do not log an error because the library may use participant creation as a trial for server existence logInfo(RTPS_PARTICIPANT, "Server wasn't able to allocate the specified listening port"); delete pimpl; return nullptr; } // Check there is at least one transport registered. if (!pimpl->networkFactoryHasRegisteredTransports()) { logError(RTPS_PARTICIPANT, "Cannot create participant, because there is any transport"); delete pimpl; return nullptr; } #if HAVE_SECURITY // Check security was correctly initialized if (!pimpl->is_security_initialized()) { logError(RTPS_PARTICIPANT, "Cannot create participant due to security initialization error"); delete pimpl; return nullptr; } #endif // if HAVE_SECURITY { std::lock_guard<std::mutex> guard(m_mutex); m_RTPSParticipants.push_back(t_p_RTPSParticipant(p, pimpl)); } if (enabled) { // Start protocols pimpl->enable(); } return p; } bool RTPSDomain::removeRTPSParticipant( RTPSParticipant* p) { if (p != nullptr) { p->mp_impl->disable(); std::unique_lock<std::mutex> lock(m_mutex); for (auto it = m_RTPSParticipants.begin(); it != m_RTPSParticipants.end(); ++it) { if (it->second->getGuid().guidPrefix == p->getGuid().guidPrefix) { RTPSDomain::t_p_RTPSParticipant participant = *it; m_RTPSParticipants.erase(it); m_RTPSParticipantIDs.erase(m_RTPSParticipantIDs.find(participant.second->getRTPSParticipantID())); lock.unlock(); removeRTPSParticipant_nts(participant); return true; } } } logError(RTPS_PARTICIPANT, "RTPSParticipant not valid or not recognized"); return false; } void RTPSDomain::removeRTPSParticipant_nts( RTPSDomain::t_p_RTPSParticipant& participant) { delete(participant.second); } RTPSWriter* RTPSDomain::createRTPSWriter( RTPSParticipant* p, WriterAttributes& watt, WriterHistory* hist, WriterListener* listen) { RTPSParticipantImpl* impl = RTPSDomainImpl::find_local_participant(p->getGuid()); if (impl) { RTPSWriter* ret_val = nullptr; if (impl->createWriter(&ret_val, watt, hist, listen)) { return ret_val; } } return nullptr; } RTPSWriter* RTPSDomain::createRTPSWriter( RTPSParticipant* p, WriterAttributes& watt, const std::shared_ptr<IPayloadPool>& payload_pool, WriterHistory* hist, WriterListener* listen) { RTPSParticipantImpl* impl = RTPSDomainImpl::find_local_participant(p->getGuid()); if (impl) { RTPSWriter* ret_val = nullptr; if (impl->createWriter(&ret_val, watt, payload_pool, hist, listen)) { return ret_val; } } return nullptr; } RTPSWriter* RTPSDomain::createRTPSWriter( RTPSParticipant* p, const EntityId_t& entity_id, WriterAttributes& watt, const std::shared_ptr<IPayloadPool>& payload_pool, WriterHistory* hist, WriterListener* listen) { RTPSParticipantImpl* impl = RTPSDomainImpl::find_local_participant(p->getGuid()); if (impl) { RTPSWriter* ret_val = nullptr; if (impl->createWriter(&ret_val, watt, payload_pool, hist, listen, entity_id)) { return ret_val; } } return nullptr; } bool RTPSDomain::removeRTPSWriter( RTPSWriter* writer) { if (writer != nullptr) { std::unique_lock<std::mutex> lock(m_mutex); for (auto it = m_RTPSParticipants.begin(); it != m_RTPSParticipants.end(); ++it) { if (it->first->getGuid().guidPrefix == writer->getGuid().guidPrefix) { t_p_RTPSParticipant participant = *it; lock.unlock(); return participant.second->deleteUserEndpoint((Endpoint*)writer); } } } return false; } RTPSReader* RTPSDomain::createRTPSReader( RTPSParticipant* p, ReaderAttributes& ratt, ReaderHistory* rhist, ReaderListener* rlisten) { RTPSParticipantImpl* impl = RTPSDomainImpl::find_local_participant(p->getGuid()); if (impl) { RTPSReader* reader; if (impl->createReader(&reader, ratt, rhist, rlisten)) { return reader; } } return nullptr; } RTPSReader* RTPSDomain::createRTPSReader( RTPSParticipant* p, ReaderAttributes& ratt, const std::shared_ptr<IPayloadPool>& payload_pool, ReaderHistory* rhist, ReaderListener* rlisten) { RTPSParticipantImpl* impl = RTPSDomainImpl::find_local_participant(p->getGuid()); if (impl) { RTPSReader* reader; if (impl->createReader(&reader, ratt, payload_pool, rhist, rlisten)) { return reader; } } return nullptr; } bool RTPSDomain::removeRTPSReader( RTPSReader* reader) { if (reader != nullptr) { std::unique_lock<std::mutex> lock(m_mutex); for (auto it = m_RTPSParticipants.begin(); it != m_RTPSParticipants.end(); ++it) { if (it->first->getGuid().guidPrefix == reader->getGuid().guidPrefix) { t_p_RTPSParticipant participant = *it; lock.unlock(); return participant.second->deleteUserEndpoint((Endpoint*)reader); } } } return false; } RTPSParticipant* RTPSDomain::clientServerEnvironmentCreationOverride( uint32_t domain_id, bool enabled, const RTPSParticipantAttributes& att, RTPSParticipantListener* listen /*= nullptr*/) { // Check the specified discovery protocol: if other than simple it has priority over ros environment variable if (att.builtin.discovery_config.discoveryProtocol != DiscoveryProtocol_t::SIMPLE) { logInfo(DOMAIN, "Detected non simple discovery protocol attributes." << " Ignoring auto default client-server setup."); return nullptr; } // we only make the attributes copy when we are sure is worth RTPSParticipantAttributes client_att(att); // Retrieve the info from the environment variable if (!load_environment_server_info(client_att.builtin.discovery_config.m_DiscoveryServers)) { // it's not an error, the environment variable may not be set. Any issue with environment // variable syntax is logError already return nullptr; } logInfo(DOMAIN, "Detected auto client-server environment variable." "Trying to create client with the default server setup."); client_att.builtin.discovery_config.discoveryProtocol = DiscoveryProtocol_t::CLIENT; // RemoteServerAttributes already fill in above RTPSParticipant* part = RTPSDomain::createParticipant(domain_id, enabled, client_att, listen); if (nullptr != part) { // client successfully created logInfo(DOMAIN, "Auto default server-client setup. Default client created."); return part; } // unable to create auto server-client default participants logError(DOMAIN, "Auto default server-client setup. Unable to create the client."); return nullptr; } void RTPSDomainImpl::create_participant_guid( int32_t& participant_id, GUID_t& guid) { if (participant_id < 0) { std::lock_guard<std::mutex> guard(RTPSDomain::m_mutex); do { participant_id = RTPSDomain::getNewId(); } while (RTPSDomain::m_RTPSParticipantIDs.find(participant_id) != RTPSDomain::m_RTPSParticipantIDs.end()); } guid_prefix_create(participant_id, guid.guidPrefix); guid.entityId = c_EntityId_RTPSParticipant; } RTPSParticipantImpl* RTPSDomainImpl::find_local_participant( const GUID_t& guid) { std::lock_guard<std::mutex> guard(RTPSDomain::m_mutex); for (const RTPSDomain::t_p_RTPSParticipant& participant : RTPSDomain::m_RTPSParticipants) { if (participant.second->getGuid().guidPrefix == guid.guidPrefix) { // Participant found, forward the query return participant.second; } } return nullptr; } RTPSReader* RTPSDomainImpl::find_local_reader( const GUID_t& reader_guid) { std::lock_guard<std::mutex> guard(RTPSDomain::m_mutex); for (const RTPSDomain::t_p_RTPSParticipant& participant : RTPSDomain::m_RTPSParticipants) { if (participant.second->getGuid().guidPrefix == reader_guid.guidPrefix) { // Participant found, forward the query return participant.second->find_local_reader(reader_guid); } } return nullptr; } RTPSWriter* RTPSDomainImpl::find_local_writer( const GUID_t& writer_guid) { std::lock_guard<std::mutex> guard(RTPSDomain::m_mutex); for (const RTPSDomain::t_p_RTPSParticipant& participant : RTPSDomain::m_RTPSParticipants) { if (participant.second->getGuid().guidPrefix == writer_guid.guidPrefix) { // Participant found, forward the query return participant.second->find_local_writer(writer_guid); } } return nullptr; } /** * Check whether intraprocess delivery should be used between two GUIDs. * * @param local_guid GUID of the local endpoint performing the query. * @param matched_guid GUID being queried about. * * @returns true when intraprocess delivery is enabled, false otherwise. */ bool RTPSDomainImpl::should_intraprocess_between( const GUID_t& local_guid, const GUID_t& matched_guid) { if (!local_guid.is_on_same_process_as(matched_guid)) { // Not on the same process, should not use intraprocess mechanism. return false; } if (local_guid.entityId == c_EntityId_SPDPWriter || local_guid.entityId == c_EntityId_SPDPReader) { // Always disabled for PDP, to avoid inter-domain communications. return false; } switch (xmlparser::XMLProfileManager::library_settings().intraprocess_delivery) { case IntraprocessDeliveryType::INTRAPROCESS_FULL: return true; case IntraprocessDeliveryType::INTRAPROCESS_USER_DATA_ONLY: return !matched_guid.is_builtin(); case IntraprocessDeliveryType::INTRAPROCESS_OFF: default: break; } return false; } } // namespace rtps } // namespace fastrtps } // namespace eprosima
1
22,273
Shouldn't we then add `mp_impl = nullptr` to the destructor of `RTPSParticipant`
eProsima-Fast-DDS
cpp
@@ -5,10 +5,10 @@ import pytest from mmdet.datasets import DATASETS -@patch('mmdet.datasets.CocoDataset.load_annotations', MagicMock) -@patch('mmdet.datasets.CustomDataset.load_annotations', MagicMock) -@patch('mmdet.datasets.XMLDataset.load_annotations', MagicMock) -@patch('mmdet.datasets.CityscapesDataset.load_annotations', MagicMock) +@patch('mmdet.datasets.CocoDataset.load_annotations', MagicMock()) +@patch('mmdet.datasets.CustomDataset.load_annotations', MagicMock()) +@patch('mmdet.datasets.XMLDataset.load_annotations', MagicMock()) +@patch('mmdet.datasets.CityscapesDataset.load_annotations', MagicMock()) @patch('mmdet.datasets.CocoDataset._filter_imgs', MagicMock) @patch('mmdet.datasets.CustomDataset._filter_imgs', MagicMock) @patch('mmdet.datasets.XMLDataset._filter_imgs', MagicMock)
1
from unittest.mock import MagicMock, patch import pytest from mmdet.datasets import DATASETS @patch('mmdet.datasets.CocoDataset.load_annotations', MagicMock) @patch('mmdet.datasets.CustomDataset.load_annotations', MagicMock) @patch('mmdet.datasets.XMLDataset.load_annotations', MagicMock) @patch('mmdet.datasets.CityscapesDataset.load_annotations', MagicMock) @patch('mmdet.datasets.CocoDataset._filter_imgs', MagicMock) @patch('mmdet.datasets.CustomDataset._filter_imgs', MagicMock) @patch('mmdet.datasets.XMLDataset._filter_imgs', MagicMock) @patch('mmdet.datasets.CityscapesDataset._filter_imgs', MagicMock) @pytest.mark.parametrize('dataset', ['CocoDataset', 'VOCDataset', 'CityscapesDataset']) def test_custom_classes_override_default(dataset): dataset_class = DATASETS.get(dataset) if dataset in ['CocoDataset', 'CityscapesDataset']: dataset_class.coco = MagicMock() dataset_class.cat_ids = MagicMock() original_classes = dataset_class.CLASSES # Test setting classes as a tuple custom_dataset = dataset_class( ann_file=MagicMock(), pipeline=[], classes=('bus', 'car'), test_mode=True, img_prefix='VOC2007' if dataset == 'VOCDataset' else '') assert custom_dataset.CLASSES != original_classes assert custom_dataset.CLASSES == ('bus', 'car') # Test setting classes as a list custom_dataset = dataset_class( ann_file=MagicMock(), pipeline=[], classes=['bus', 'car'], test_mode=True, img_prefix='VOC2007' if dataset == 'VOCDataset' else '') assert custom_dataset.CLASSES != original_classes assert custom_dataset.CLASSES == ['bus', 'car'] # Test overriding not a subset custom_dataset = dataset_class( ann_file=MagicMock(), pipeline=[], classes=['foo'], test_mode=True, img_prefix='VOC2007' if dataset == 'VOCDataset' else '') assert custom_dataset.CLASSES != original_classes assert custom_dataset.CLASSES == ['foo'] # Test default behavior custom_dataset = dataset_class( ann_file=MagicMock(), pipeline=[], classes=None, test_mode=True, img_prefix='VOC2007' if dataset == 'VOCDataset' else '') assert custom_dataset.CLASSES == original_classes # Test sending file path import tempfile tmp_file = tempfile.NamedTemporaryFile() with open(tmp_file.name, 'w') as f: f.write('bus\ncar\n') custom_dataset = dataset_class( ann_file=MagicMock(), pipeline=[], classes=tmp_file.name, test_mode=True, img_prefix='VOC2007' if dataset == 'VOCDataset' else '') tmp_file.close() assert custom_dataset.CLASSES != original_classes assert custom_dataset.CLASSES == ['bus', 'car']
1
22,814
Are the additional brackets necessary?
open-mmlab-mmdetection
py
@@ -148,7 +148,7 @@ public class Invoker implements InvocationHandler { } PojoConsumerOperationMeta pojoConsumerOperationMeta = consumerMeta - .findOperationMeta(MethodUtils.findSwaggerMethodName(method)); + .findOperationMeta(MethodUtils.findSwaggerMethodName(method), consumerIntf); if (pojoConsumerOperationMeta == null) { throw new IllegalStateException( String.format(
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.servicecomb.provider.pojo; import java.lang.reflect.InvocationHandler; import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CompletableFuture; import javax.ws.rs.core.Response.Status; import org.apache.commons.lang3.StringUtils; import org.apache.servicecomb.core.Invocation; import org.apache.servicecomb.core.SCBEngine; import org.apache.servicecomb.core.definition.MicroserviceMeta; import org.apache.servicecomb.core.definition.OperationMeta; import org.apache.servicecomb.core.definition.SchemaMeta; import org.apache.servicecomb.core.invocation.InvocationFactory; import org.apache.servicecomb.core.provider.consumer.InvokerUtils; import org.apache.servicecomb.core.provider.consumer.MicroserviceReferenceConfig; import org.apache.servicecomb.core.provider.consumer.ReferenceConfig; import org.apache.servicecomb.provider.pojo.definition.PojoConsumerMeta; import org.apache.servicecomb.provider.pojo.definition.PojoConsumerOperationMeta; import org.apache.servicecomb.swagger.engine.SwaggerConsumer; import org.apache.servicecomb.swagger.engine.SwaggerConsumerOperation; import org.apache.servicecomb.swagger.generator.core.utils.MethodUtils; import org.apache.servicecomb.swagger.invocation.Response; import org.apache.servicecomb.swagger.invocation.context.InvocationContextCompletableFuture; import org.apache.servicecomb.swagger.invocation.exception.ExceptionFactory; import org.apache.servicecomb.swagger.invocation.exception.InvocationException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class Invoker implements InvocationHandler { private static final Logger LOGGER = LoggerFactory.getLogger(Invoker.class); protected SCBEngine scbEngine; protected String appId; protected String microserviceName; // can be null, should find SchemaMeta by consumerIntf in this time protected String schemaId; protected Class<?> consumerIntf; // not always equals codec meta // for highway, codec meta is relate to target instance // to avoid limit producer to only allow append parameter protected PojoConsumerMeta consumerMeta; @SuppressWarnings("unchecked") public static <T> T createProxy(String microserviceName, String schemaId, Class<?> consumerIntf) { Invoker invoker = new Invoker(microserviceName, schemaId, consumerIntf); return (T) Proxy.newProxyInstance(consumerIntf.getClassLoader(), new Class<?>[] {consumerIntf}, invoker); } public Invoker(String microserviceName, String schemaId, Class<?> consumerIntf) { this.microserviceName = microserviceName; this.schemaId = schemaId; this.consumerIntf = consumerIntf; } private void ensureStatusUp() { if (scbEngine == null) { if (SCBEngine.getInstance() == null) { String message = "The request is rejected. Cannot process the request due to SCBEngine not ready."; LOGGER.warn(message); throw new InvocationException(Status.SERVICE_UNAVAILABLE, message); } this.scbEngine = SCBEngine.getInstance(); this.appId = scbEngine.parseAppId(microserviceName); } scbEngine.ensureStatusUp(); } private boolean isNeedRefresh() { return consumerMeta == null || consumerMeta.isExpired(); } protected SchemaMeta findSchemaMeta(MicroserviceMeta microserviceMeta) { // if present schemaId, just use it if (StringUtils.isNotEmpty(schemaId)) { return microserviceMeta.findSchemaMeta(schemaId); } // not present schemaId, try interface first SchemaMeta schemaMeta = microserviceMeta.findSchemaMeta(consumerIntf); if (schemaMeta != null) { return schemaMeta; } // try interface name second return microserviceMeta.findSchemaMeta(consumerIntf.getName()); } private PojoConsumerMeta refreshMeta() { MicroserviceReferenceConfig microserviceReferenceConfig = scbEngine .createMicroserviceReferenceConfig(microserviceName); MicroserviceMeta microserviceMeta = microserviceReferenceConfig.getLatestMicroserviceMeta(); SchemaMeta schemaMeta = findSchemaMeta(microserviceMeta); if (schemaMeta == null) { throw new IllegalStateException( String.format( "Schema not exist, microserviceName=%s, schemaId=%s, consumer interface=%s; " + "new producer not running or not deployed.", microserviceName, schemaId, consumerIntf.getName())); } SwaggerConsumer swaggerConsumer = scbEngine.getSwaggerEnvironment() .createConsumer(consumerIntf, schemaMeta.getSwagger()); return new PojoConsumerMeta(microserviceReferenceConfig, swaggerConsumer, schemaMeta); } @Override public Object invoke(Object proxy, Method method, Object[] args) { ensureStatusUp(); if (isNeedRefresh()) { synchronized (this) { if (isNeedRefresh()) { this.consumerMeta = refreshMeta(); } } } PojoConsumerOperationMeta pojoConsumerOperationMeta = consumerMeta .findOperationMeta(MethodUtils.findSwaggerMethodName(method)); if (pojoConsumerOperationMeta == null) { throw new IllegalStateException( String.format( "Consumer method %s:%s not exist in contract, microserviceName=%s, schemaId=%s; " + "new producer not running or not deployed.", consumerIntf.getName(), method.getName(), microserviceName, schemaId)); } SwaggerConsumerOperation consumerOperation = pojoConsumerOperationMeta.getSwaggerConsumerOperation(); OperationMeta operationMeta = pojoConsumerOperationMeta.getOperationMeta(); Invocation invocation = InvocationFactory.forConsumer( findReferenceConfig(operationMeta), operationMeta, null); invocation.setResponsesMeta(pojoConsumerOperationMeta.getResponsesMeta()); Map<String, Object> invocationArguments = toArguments(method, args); invocation.setInvocationArguments(invocationArguments); if (CompletableFuture.class.equals(method.getReturnType())) { return completableFutureInvoke(invocation, consumerOperation); } return syncInvoke(invocation, consumerOperation); } public Map<String, Object> toArguments(Method method, Object[] args) { Map<String, Object> arguments = new HashMap<>(); for (int i = 0; i < method.getParameterCount(); i++) { arguments.put(method.getParameters()[i].getName(), args[i]); } return arguments; } protected ReferenceConfig findReferenceConfig(OperationMeta operationMeta) { return consumerMeta.getMicroserviceReferenceConfig().createReferenceConfig(operationMeta); } protected Object syncInvoke(Invocation invocation, SwaggerConsumerOperation consumerOperation) { Response response = InvokerUtils.innerSyncInvoke(invocation); if (response.isSuccessed()) { return consumerOperation.getResponseMapper().mapResponse(response); } throw ExceptionFactory.convertConsumerException(response.getResult()); } protected CompletableFuture<Object> completableFutureInvoke(Invocation invocation, SwaggerConsumerOperation consumerOperation) { CompletableFuture<Object> future = new InvocationContextCompletableFuture<>(invocation); InvokerUtils.reactiveInvoke(invocation, response -> { if (response.isSuccessed()) { Object result = consumerOperation.getResponseMapper().mapResponse(response); future.complete(result); return; } future.completeExceptionally(response.getResult()); }); return future; } }
1
11,708
consumerMeta only belongs to this invoker instance only relate to this one consumerIntf class seems no need to build a complex key?
apache-servicecomb-java-chassis
java
@@ -35,6 +35,11 @@ RSpec.describe RSpec::Core::Formatters::BaseTextFormatter do expect(formatter_output.string).to match("1 example, 1 failure, 1 pending") end + it "with 1s outputs singular (only pending)" do + send_notification :dump_summary, summary_notification(1, examples(1), examples(0), examples(1), 0) + expect(formatter_output.string).to match("1 example, 0 failures, 1 pending") + end + it "with 2s outputs pluralized (including pending)" do send_notification :dump_summary, summary_notification(2, examples(2), examples(2), examples(2), 0) expect(formatter_output.string).to match("2 examples, 2 failures, 2 pending")
1
# encoding: utf-8 require 'rspec/core/formatters/base_text_formatter' RSpec.describe RSpec::Core::Formatters::BaseTextFormatter do include FormatterSupport context "when closing the formatter", :isolated_directory => true do let(:output_to_close) { File.new("./output_to_close", "w") } let(:formatter) { described_class.new(output_to_close) } it 'does not close an already closed output stream' do output_to_close.close expect { formatter.close(RSpec::Core::Notifications::NullNotification) }.not_to raise_error end it "flushes output before closing the stream so buffered bytes are not lost if we exit right away" do expect(output_to_close).to receive(:flush).ordered.and_call_original # Windows appears to not let the `:isolated_directory` shared group cleanup if # the file isn't closed, so we need to use `and_call_original` here. expect(output_to_close).to receive(:close).ordered.and_call_original formatter.close(RSpec::Core::Notifications::NullNotification) end end describe "#dump_summary" do it "with 0s outputs pluralized (excluding pending)" do send_notification :dump_summary, summary_notification(0, [], [], [], 0) expect(formatter_output.string).to match("0 examples, 0 failures") end it "with 1s outputs singular (including pending)" do send_notification :dump_summary, summary_notification(0, examples(1), examples(1), examples(1), 0) expect(formatter_output.string).to match("1 example, 1 failure, 1 pending") end it "with 2s outputs pluralized (including pending)" do send_notification :dump_summary, summary_notification(2, examples(2), examples(2), examples(2), 0) expect(formatter_output.string).to match("2 examples, 2 failures, 2 pending") end it 'with errors includes that count' do send_notification :dump_summary, summary_notification(2, examples(2), examples(2), examples(2), 0, 3) expect(formatter_output.string).to match("2 examples, 2 failures, 2 pending, 3 errors occurred outside of examples") end describe "rerun command for failed examples" do it "uses the location to identify the example" do line = __LINE__ + 2 example_group = RSpec.describe("example group") do it("fails") { fail } end expect(output_from_running example_group).to include("rspec #{RSpec::Core::Metadata::relative_path("#{__FILE__}:#{line}")} # example group fails") end context "for an example defined in an file required by the user rather than loaded by rspec" do it "looks through ancestor metadata to find a workable re-run command" do line = __LINE__ + 1 example_group = RSpec.describe("example group") do # Using eval in order to make it think this got defined in an external file. instance_eval "it('fails') { fail }", "some/external/file.rb", 1 end expect(output_from_running example_group).to include("rspec #{RSpec::Core::Metadata::relative_path("#{__FILE__}:#{line}")} # example group fails") end end context "for an example that is not uniquely identified by the location" do let(:example_group_in_this_file) { example_group_defined_in(__FILE__) } def example_group_defined_in(file) instance_eval <<-EOS, file, 1 $group = RSpec.describe("example group") do 1.upto(2) do |i| it("compares \#{i} against 2") { expect(i).to eq(2) } end end EOS $group end let(:id) { "#{RSpec::Core::Metadata::relative_path("#{__FILE__}")}[1:1]" } it "uses the id instead" do with_env_vars 'SHELL' => '/usr/local/bin/bash' do expect(output_from_running example_group_in_this_file).to include("rspec #{id} # example group compares 1 against 2") end end context "on a shell that may not handle unquoted ids" do around { |ex| with_env_vars('SHELL' => '/usr/local/bin/cash', &ex) } it 'quotes the id to be safe so the rerun command can be copied and pasted' do expect(output_from_running example_group_in_this_file).to include("rspec '#{id}'") end it 'correctly escapes file names that have quotes in them' do group_in_other_file = example_group_defined_in("./path/with'quote_spec.rb") expect(output_from_running group_in_other_file).to include("rspec './path/with\\'quote_spec.rb[1:1]'") end end end def output_from_running(example_group) allow(RSpec.configuration).to receive(:loaded_spec_files) { RSpec::Core::Set.new([File.expand_path(__FILE__)]) } example_group.run(reporter) examples = example_group.examples failed = examples.select { |e| e.execution_result.status == :failed } send_notification :dump_summary, summary_notification(1, examples, failed, [], 0) formatter_output.string end end end describe "#dump_failures" do let(:group) { RSpec.describe("group name") } before { allow(RSpec.configuration).to receive(:color_enabled?) { false } } def run_all_and_dump_failures group.run(reporter) send_notification :dump_failures, failed_examples_notification end it "preserves formatting" do group.example("example name") { expect("this").to eq("that") } run_all_and_dump_failures expect(formatter_output.string).to match(/group name example name/m) expect(formatter_output.string).to match(/(\s+)expected: \"that\"\n\1 got: \"this\"/m) end context "with an exception without a message" do it "does not throw NoMethodError" do exception_without_message = Exception.new() allow(exception_without_message).to receive(:message) { nil } group.example("example name") { raise exception_without_message } expect { run_all_and_dump_failures }.not_to raise_error end it "preserves ancestry" do example = group.example("example name") { raise "something" } run_all_and_dump_failures expect(example.example_group.parent_groups.size).to eq 1 end end context "with an exception that has an exception instance as its message" do it "does not raise NoMethodError" do gonzo_exception = RuntimeError.new allow(gonzo_exception).to receive(:message) { gonzo_exception } group.example("example name") { raise gonzo_exception } expect { run_all_and_dump_failures }.not_to raise_error end end context "with an instance of an anonymous exception class" do it "substitutes '(anonymous error class)' for the missing class name" do exception = Class.new(StandardError).new group.example("example name") { raise exception } run_all_and_dump_failures expect(formatter_output.string).to include('(anonymous error class)') end end context "with an exception class other than RSpec" do it "does not show the error class" do group.example("example name") { raise NameError.new('foo') } run_all_and_dump_failures expect(formatter_output.string).to match(/NameError/m) end end if String.method_defined?(:encoding) context "with an exception that has a differently encoded message" do it "runs without encountering an encoding exception" do group.example("Mixing encodings, e.g. UTF-8: © and Binary") { raise "Error: \xC2\xA9".force_encoding("ASCII-8BIT") } run_all_and_dump_failures expect(formatter_output.string).to match(/RuntimeError:\n\s+Error: \?\?/m) # ?? because the characters dont encode properly end end end context "with a failed expectation (rspec-expectations)" do it "does not show the error class" do group.example("example name") { expect("this").to eq("that") } run_all_and_dump_failures expect(formatter_output.string).not_to match(/RSpec/m) end end context "with a failed message expectation (rspec-mocks)" do it "does not show the error class" do group.example("example name") { expect("this").to receive("that") } run_all_and_dump_failures expect(formatter_output.string).not_to match(/RSpec/m) end end %w[ include_examples it_should_behave_like ].each do |inclusion_method| context "for #shared_examples included using #{inclusion_method}" do it 'outputs the name and location' do group.shared_examples 'foo bar' do it("example name") { expect("this").to eq("that") } end line = __LINE__.next group.__send__(inclusion_method, 'foo bar') run_all_and_dump_failures expect(formatter_output.string.lines).to include(a_string_ending_with( 'Shared Example Group: "foo bar" called from ' + "#{RSpec::Core::Metadata.relative_path(__FILE__)}:#{line}\n" )) end context 'that contains nested example groups' do it 'outputs the name and location' do group.shared_examples 'foo bar' do describe 'nested group' do it("example name") { expect("this").to eq("that") } end end line = __LINE__.next group.__send__(inclusion_method, 'foo bar') run_all_and_dump_failures expect(formatter_output.string.lines).to include(a_string_ending_with( 'Shared Example Group: "foo bar" called from ' + "./spec/rspec/core/formatters/base_text_formatter_spec.rb:#{line}\n" )) end end context "that contains shared group nesting" do it 'includes each inclusion location in the output' do group.shared_examples "inner" do example { expect(1).to eq(2) } end inner_line = __LINE__ + 2 group.shared_examples "outer" do __send__(inclusion_method, "inner") end outer_line = __LINE__ + 1 group.__send__(inclusion_method, 'outer') run_all_and_dump_failures expect(formatter_output.string.lines.grep(/Shared Example Group/)).to match [ a_string_ending_with( 'Shared Example Group: "inner" called from ' + "./spec/rspec/core/formatters/base_text_formatter_spec.rb:#{inner_line}\n" ), a_string_ending_with( 'Shared Example Group: "outer" called from ' + "./spec/rspec/core/formatters/base_text_formatter_spec.rb:#{outer_line}\n" ), ] end end end end end describe "custom_colors" do it "uses the custom success color" do RSpec.configure do |config| config.color_mode = :on config.success_color = :cyan end send_notification :dump_summary, summary_notification(0, examples(1), [], [], 0) expect(formatter_output.string).to include("\e[36m") end end end
1
16,039
this one is unrelated right? (Don't mind including it, just making sure I understand)
rspec-rspec-core
rb
@@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/datasets/coco_instance.py', '../_base_/models/solo_r50_fpn.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[9, 11])
1
1
23,516
8, 11 actually achieves similar performance, we should use our default config if [9,11] is unnecessary.
open-mmlab-mmdetection
py
@@ -0,0 +1,12 @@ +package yarpc + +import ( + "time" + + "github.com/yarpc/yarpc-go/encoding/raw" +) + +func SleepRaw(reqMeta *raw.ReqMeta, body []byte) ([]byte, *raw.ResMeta, error) { + time.Sleep(1 * time.Second) + return nil, nil, nil +}
1
1
9,854
For a separate PR: Can we make this a JSON/Thrift procedure instead? It could accept the amount of time it needs to sleep as an argument.
yarpc-yarpc-go
go
@@ -92,6 +92,7 @@ class Tab(browsertab.AbstractTab): pass + @pytest.mark.xfail(run=False, reason='Causes segfaults, see #1638') def test_tab(qtbot, view, config_stub, tab_registry, mode_manager): tab_w = Tab(win_id=0, mode_manager=mode_manager)
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2016-2017 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. import pytest from qutebrowser.browser import browsertab pytestmark = pytest.mark.usefixtures('redirect_webengine_data') try: from PyQt5.QtWebKitWidgets import QWebView except ImportError: QWebView = None try: from PyQt5.QtWebEngineWidgets import QWebEngineView except ImportError: QWebEngineView = None @pytest.fixture(params=[QWebView, QWebEngineView]) def view(qtbot, config_stub, request): if request.param is None: pytest.skip("View not available") v = request.param() qtbot.add_widget(v) return v @pytest.fixture(params=['webkit', 'webengine']) def tab(request, qtbot, tab_registry, cookiejar_and_cache, mode_manager): if request.param == 'webkit': webkittab = pytest.importorskip('qutebrowser.browser.webkit.webkittab') tab_class = webkittab.WebKitTab elif request.param == 'webengine': webenginetab = pytest.importorskip( 'qutebrowser.browser.webengine.webenginetab') tab_class = webenginetab.WebEngineTab else: assert False t = tab_class(win_id=0, mode_manager=mode_manager) qtbot.add_widget(t) yield t class Zoom(browsertab.AbstractZoom): def _set_factor_internal(self, _factor): pass def factor(self): assert False class Tab(browsertab.AbstractTab): # pylint: disable=abstract-method def __init__(self, win_id, mode_manager, parent=None): super().__init__(win_id=win_id, mode_manager=mode_manager, parent=parent) self.history = browsertab.AbstractHistory(self) self.scroller = browsertab.AbstractScroller(self, parent=self) self.caret = browsertab.AbstractCaret(win_id=self.win_id, mode_manager=mode_manager, tab=self, parent=self) self.zoom = Zoom(win_id=self.win_id) self.search = browsertab.AbstractSearch(parent=self) self.printing = browsertab.AbstractPrinting() self.elements = browsertab.AbstractElements(self) self.action = browsertab.AbstractAction() def _install_event_filter(self): pass @pytest.mark.xfail(run=False, reason='Causes segfaults, see #1638') def test_tab(qtbot, view, config_stub, tab_registry, mode_manager): tab_w = Tab(win_id=0, mode_manager=mode_manager) qtbot.add_widget(tab_w) assert tab_w.win_id == 0 assert tab_w._widget is None tab_w._set_widget(view) assert tab_w._widget is view assert tab_w.history._tab is tab_w assert tab_w.history._history is view.history() assert view.parent() is tab_w with qtbot.waitExposed(tab_w): tab_w.show()
1
19,436
Please remove this blank line.
qutebrowser-qutebrowser
py
@@ -26,6 +26,7 @@ use Front\Front; use Propel\Runtime\Exception\PropelException; use Symfony\Component\HttpFoundation\Cookie; use Symfony\Component\HttpFoundation\Request; +use Thelia\Cart\CartTrait; use Thelia\Controller\Front\BaseFrontController; use Thelia\Core\Event\Cart\CartEvent; use Thelia\Core\Event\Order\OrderEvent;
1
<?php /*************************************************************************************/ /* */ /* Thelia */ /* */ /* Copyright (c) OpenStudio */ /* email : [email protected] */ /* web : http://www.thelia.net */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 3 of the License */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* */ /*************************************************************************************/ namespace Front\Controller; use Front\Front; use Propel\Runtime\Exception\PropelException; use Symfony\Component\HttpFoundation\Cookie; use Symfony\Component\HttpFoundation\Request; use Thelia\Controller\Front\BaseFrontController; use Thelia\Core\Event\Cart\CartEvent; use Thelia\Core\Event\Order\OrderEvent; use Thelia\Core\Event\TheliaEvents; use Thelia\Form\CartAdd; use Thelia\Form\Exception\FormValidationException; use Thelia\Log\Tlog; use Thelia\Model\AddressQuery; use Thelia\Model\ConfigQuery; use Thelia\Module\Exception\DeliveryException; use Thelia\Tools\URL; class CartController extends BaseFrontController { public function addItem() { $request = $this->getRequest(); $cartAdd = $this->getAddCartForm($request); $message = null; try { $form = $this->validateForm($cartAdd); $cartEvent = $this->getCartEvent(); $cartEvent->bindForm($form); $this->getDispatcher()->dispatch(TheliaEvents::CART_ADDITEM, $cartEvent); $this->afterModifyCart(); if (null !== $response = $this->generateSuccessRedirect($cartAdd)) { return $response; } } catch (PropelException $e) { Tlog::getInstance()->error(sprintf("Failed to add item to cart with message : %s", $e->getMessage())); $message = $this->getTranslator()->trans( "Failed to add this article to your cart, please try again", [], Front::MESSAGE_DOMAIN ); } catch (FormValidationException $e) { $message = $e->getMessage(); } // If Ajax Request if ($this->getRequest()->isXmlHttpRequest()) { $request = $this->getRequest(); $request->attributes->set('_view', "includes/mini-cart"); } if ($message) { $cartAdd->setErrorMessage($message); $this->getParserContext()->addForm($cartAdd); } } public function changeItem() { $cartEvent = $this->getCartEvent(); $cartEvent->setCartItem($this->getRequest()->get("cart_item")); $cartEvent->setQuantity($this->getRequest()->get("quantity")); try { $this->getTokenProvider()->checkToken( $this->getRequest()->query->get('_token') ); $this->dispatch(TheliaEvents::CART_UPDATEITEM, $cartEvent); $this->afterModifyCart(); if (null !== $response = $this->generateSuccessRedirect()) { return $response; } } catch (PropelException $e) { $this->getParserContext()->setGeneralError($e->getMessage()); } } public function deleteItem() { $cartEvent = $this->getCartEvent(); $cartEvent->setCartItem($this->getRequest()->get("cart_item")); try { $this->getTokenProvider()->checkToken( $this->getRequest()->query->get('_token') ); $this->getDispatcher()->dispatch(TheliaEvents::CART_DELETEITEM, $cartEvent); $this->afterModifyCart(); if (null !== $response = $this->generateSuccessRedirect()) { return $response; } } catch (PropelException $e) { Tlog::getInstance()->error(sprintf("error during deleting cartItem with message : %s", $e->getMessage())); $this->getParserContext()->setGeneralError($e->getMessage()); } } public function changeCountry() { $redirectUrl = URL::getInstance()->absoluteUrl("/cart"); $deliveryId = $this->getRequest()->get("country"); $cookieName = ConfigQuery::read('front_cart_country_cookie_name', 'fcccn'); $cookieExpires = ConfigQuery::read('front_cart_country_cookie_expires', 2592000); $cookieExpires = intval($cookieExpires) ?: 2592000; $cookie = new Cookie($cookieName, $deliveryId, time() + $cookieExpires, '/'); $response = $this->generateRedirect($redirectUrl); $response->headers->setCookie($cookie); return $response; } /** * @return \Thelia\Core\Event\Cart\CartEvent */ protected function getCartEvent() { $cart = $this->getSession()->getSessionCart($this->getDispatcher()); return new CartEvent($cart); } /** * Find the good way to construct the cart form * * @param Request $request * @return CartAdd */ private function getAddCartForm(Request $request) { if ($request->isMethod("post")) { $cartAdd = new CartAdd($request); } else { $cartAdd = new CartAdd( $request, "form", array(), array( 'csrf_protection' => false, ) ); } return $cartAdd; } protected function afterModifyCart() { /* recalculate postage amount */ $order = $this->getSession()->getOrder(); if (null !== $order) { $deliveryModule = $order->getModuleRelatedByDeliveryModuleId(); $deliveryAddress = AddressQuery::create()->findPk($order->getChoosenDeliveryAddress()); if (null !== $deliveryModule && null !== $deliveryAddress) { $moduleInstance = $deliveryModule->getModuleInstance($this->container); $orderEvent = new OrderEvent($order); try { $postage = $moduleInstance->getPostage($deliveryAddress->getCountry()); $orderEvent->setPostage($postage); $this->getDispatcher()->dispatch(TheliaEvents::ORDER_SET_POSTAGE, $orderEvent); } catch (DeliveryException $ex) { // The postage has been chosen, but changes in the cart causes an exception. // Reset the postage data in the order $orderEvent->setDeliveryModule(0); $this->getDispatcher()->dispatch(TheliaEvents::ORDER_SET_DELIVERY_MODULE, $orderEvent); } } } } }
1
10,655
the cartTrait is not used anymore
thelia-thelia
php
@@ -82,6 +82,7 @@ namespace OpenTelemetry.Trace.Configuration /// Creates tracerSdk factory. /// </summary> /// <param name="configure">Function that configures tracerSdk factory.</param> + /// <returns><see cref="TracerFactory"/>.</returns> public static TracerFactory Create(Action<TracerBuilder> configure) { if (configure == null)
1
// <copyright file="TracerFactory.cs" company="OpenTelemetry Authors"> // Copyright 2018, OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Collections.Generic; using System.Linq; using OpenTelemetry.Context.Propagation; using OpenTelemetry.Resources; using OpenTelemetry.Trace.Export; using OpenTelemetry.Trace.Export.Internal; using OpenTelemetry.Trace.Samplers; namespace OpenTelemetry.Trace.Configuration { public class TracerFactory : TracerFactoryBase, IDisposable { private readonly object lck = new object(); private readonly Dictionary<TracerRegistryKey, Tracer> tracerRegistry = new Dictionary<TracerRegistryKey, Tracer>(); private readonly List<object> collectors = new List<object>(); private readonly Sampler sampler; private readonly Resource defaultResource; private readonly TracerConfiguration configurationOptions; private readonly SpanProcessor spanProcessor; private Tracer defaultTracer; private TracerFactory(TracerBuilder builder) { this.sampler = builder.Sampler ?? new AlwaysOnSampler(); this.defaultResource = builder.Resource; this.configurationOptions = builder.TracerConfigurationOptions ?? new TracerConfiguration(); if (builder.ProcessingPipelines == null || !builder.ProcessingPipelines.Any()) { // if there are no pipelines are configured, use noop processor this.spanProcessor = new NoopSpanProcessor(); } else if (builder.ProcessingPipelines.Count == 1) { // if there is only one pipeline - use it's outer processor as a // single processor on the tracerSdk. var processorFactory = builder.ProcessingPipelines[0]; this.spanProcessor = processorFactory.Build(); } else { // if there are more pipelines, use processor that will broadcast to all pipelines var processors = new SpanProcessor[builder.ProcessingPipelines.Count]; for (int i = 0; i < builder.ProcessingPipelines.Count; i++) { processors[i] = builder.ProcessingPipelines[i].Build(); } this.spanProcessor = new BroadcastProcessor(processors); } this.defaultTracer = new TracerSdk( this.spanProcessor, this.sampler, this.configurationOptions, this.defaultResource); } /// <summary> /// Creates tracerSdk factory. /// </summary> /// <param name="configure">Function that configures tracerSdk factory.</param> public static TracerFactory Create(Action<TracerBuilder> configure) { if (configure == null) { throw new ArgumentNullException(nameof(configure)); } var builder = new TracerBuilder(); configure(builder); var factory = new TracerFactory(builder); if (builder.CollectorFactories != null) { foreach (var collector in builder.CollectorFactories) { var tracer = factory.GetTracer(collector.Name, collector.Version); factory.collectors.Add(collector.Factory(tracer)); } } return factory; } public override Tracer GetTracer(string name, string version = null) { if (string.IsNullOrEmpty(name)) { return this.defaultTracer; } lock (this.lck) { var key = new TracerRegistryKey(name, version); if (!this.tracerRegistry.TryGetValue(key, out var tracer)) { tracer = this.defaultTracer = new TracerSdk( this.spanProcessor, this.sampler, this.configurationOptions, this.defaultResource.Merge(new Resource(CreateLibraryResourceLabels(name, version)))); this.tracerRegistry.Add(key, tracer); } return tracer; } } public void Dispose() { foreach (var item in this.collectors) { if (item is IDisposable disposable) { disposable.Dispose(); } } this.collectors.Clear(); if (this.spanProcessor is IDisposable disposableProcessor) { disposableProcessor.Dispose(); } } private static IEnumerable<KeyValuePair<string, object>> CreateLibraryResourceLabels(string name, string version) { var attributes = new Dictionary<string, object> { [Resource.LibraryNameKey] = name }; if (!string.IsNullOrEmpty(version)) { attributes.Add(Resource.LibraryVersionKey, version); } return attributes; } private readonly struct TracerRegistryKey { private readonly string name; private readonly string version; internal TracerRegistryKey(string name, string version) { this.name = name; this.version = version; } } } }
1
13,533
Cn you please make the message more human friendly.
open-telemetry-opentelemetry-dotnet
.cs
@@ -23,10 +23,12 @@ import os import sys import html import netrc -from typing import Callable, Mapping, List, Optional import tempfile +from enum import Enum, unique +from typing import Callable, Mapping, List, Optional from PyQt5.QtCore import QUrl +from PyQt5.QtWebEngineWidgets import QWebEnginePage from qutebrowser.config import config from qutebrowser.utils import (usertypes, message, log, objreg, jinja, utils,
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2016-2021 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <https://www.gnu.org/licenses/>. """Various utilities shared between webpage/webview subclasses.""" import os import sys import html import netrc from typing import Callable, Mapping, List, Optional import tempfile from PyQt5.QtCore import QUrl from qutebrowser.config import config from qutebrowser.utils import (usertypes, message, log, objreg, jinja, utils, qtutils) from qutebrowser.mainwindow import mainwindow from qutebrowser.misc import guiprocess class CallSuper(Exception): """Raised when the caller should call the superclass instead.""" def custom_headers(url): """Get the combined custom headers.""" headers = {} dnt_config = config.instance.get('content.headers.do_not_track', url=url) if dnt_config is not None: dnt = b'1' if dnt_config else b'0' headers[b'DNT'] = dnt conf_headers = config.instance.get('content.headers.custom', url=url) for header, value in conf_headers.items(): headers[header.encode('ascii')] = value.encode('ascii') accept_language = config.instance.get('content.headers.accept_language', url=url) if accept_language is not None: headers[b'Accept-Language'] = accept_language.encode('ascii') return sorted(headers.items()) def authentication_required(url, authenticator, abort_on): """Ask a prompt for an authentication question.""" realm = authenticator.realm() if realm: msg = '<b>{}</b> says:<br/>{}'.format( html.escape(url.toDisplayString()), html.escape(realm)) else: msg = '<b>{}</b> needs authentication'.format( html.escape(url.toDisplayString())) urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded) answer = message.ask(title="Authentication required", text=msg, mode=usertypes.PromptMode.user_pwd, abort_on=abort_on, url=urlstr) if answer is not None: authenticator.setUser(answer.user) authenticator.setPassword(answer.password) return answer def _format_msg(msg: str) -> str: """Convert message to HTML suitable for rendering.""" return html.escape(msg).replace('\n', '<br />') def javascript_confirm(url, js_msg, abort_on): """Display a javascript confirm prompt.""" log.js.debug("confirm: {}".format(js_msg)) if config.val.content.javascript.modal_dialog: raise CallSuper msg = 'From <b>{}</b>:<br/>{}'.format(html.escape(url.toDisplayString()), _format_msg(js_msg)) urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded) ans = message.ask('Javascript confirm', msg, mode=usertypes.PromptMode.yesno, abort_on=abort_on, url=urlstr) return bool(ans) def javascript_prompt(url, js_msg, default, abort_on): """Display a javascript prompt.""" log.js.debug("prompt: {}".format(js_msg)) if config.val.content.javascript.modal_dialog: raise CallSuper if not config.val.content.javascript.prompt: return (False, "") msg = '<b>{}</b> asks:<br/>{}'.format(html.escape(url.toDisplayString()), _format_msg(js_msg)) urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded) answer = message.ask('Javascript prompt', msg, mode=usertypes.PromptMode.text, default=default, abort_on=abort_on, url=urlstr) if answer is None: return (False, "") else: return (True, answer) def javascript_alert(url, js_msg, abort_on): """Display a javascript alert.""" log.js.debug("alert: {}".format(js_msg)) if config.val.content.javascript.modal_dialog: raise CallSuper if not config.val.content.javascript.alert: return msg = 'From <b>{}</b>:<br/>{}'.format(html.escape(url.toDisplayString()), _format_msg(js_msg)) urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded) message.ask('Javascript alert', msg, mode=usertypes.PromptMode.alert, abort_on=abort_on, url=urlstr) # Needs to line up with the values allowed for the # content.javascript.log setting. _JS_LOGMAP: Mapping[str, Callable[[str], None]] = { 'none': lambda arg: None, 'debug': log.js.debug, 'info': log.js.info, 'warning': log.js.warning, 'error': log.js.error, } def javascript_log_message(level, source, line, msg): """Display a JavaScript log message.""" logstring = "[{}:{}] {}".format(source, line, msg) logger = _JS_LOGMAP[config.cache['content.javascript.log'][level.name]] logger(logstring) def ignore_certificate_errors(url, errors, abort_on): """Display a certificate error question. Args: url: The URL the errors happened in errors: A list of QSslErrors or QWebEngineCertificateErrors Return: True if the error should be ignored, False otherwise. """ ssl_strict = config.instance.get('content.ssl_strict', url=url) log.network.debug("Certificate errors {!r}, strict {}".format( errors, ssl_strict)) for error in errors: assert error.is_overridable(), repr(error) if ssl_strict == 'ask': err_template = jinja.environment.from_string(""" Errors while loading <b>{{url.toDisplayString()}}</b>:<br/> <ul> {% for err in errors %} <li>{{err}}</li> {% endfor %} </ul> """.strip()) msg = err_template.render(url=url, errors=errors) urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded) ignore = message.ask(title="Certificate errors - continue?", text=msg, mode=usertypes.PromptMode.yesno, default=False, abort_on=abort_on, url=urlstr) if ignore is None: # prompt aborted ignore = False return ignore elif ssl_strict is False: log.network.debug("ssl_strict is False, only warning about errors") for err in errors: # FIXME we might want to use warn here (non-fatal error) # https://github.com/qutebrowser/qutebrowser/issues/114 message.error('Certificate error: {}'.format(err)) return True elif ssl_strict is True: return False else: raise ValueError("Invalid ssl_strict value {!r}".format(ssl_strict)) raise utils.Unreachable def feature_permission(url, option, msg, yes_action, no_action, abort_on, blocking=False): """Handle a feature permission request. Args: url: The URL the request was done for. option: An option name to check. msg: A string like "show notifications" yes_action: A callable to call if the request was approved no_action: A callable to call if the request was denied abort_on: A list of signals which interrupt the question. blocking: If True, ask a blocking question. Return: The Question object if a question was asked (and blocking=False), None otherwise. """ config_val = config.instance.get(option, url=url) if config_val == 'ask': if url.isValid(): urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded) text = "Allow the website at <b>{}</b> to {}?".format( html.escape(url.toDisplayString()), msg) else: urlstr = None option = None # For message.ask/confirm_async text = "Allow the website to {}?".format(msg) if blocking: answer = message.ask(abort_on=abort_on, title='Permission request', text=text, url=urlstr, option=option, mode=usertypes.PromptMode.yesno) if answer: yes_action() else: no_action() return None else: return message.confirm_async( yes_action=yes_action, no_action=no_action, cancel_action=no_action, abort_on=abort_on, title='Permission request', text=text, url=urlstr, option=option) elif config_val: yes_action() return None else: no_action() return None def get_tab(win_id, target): """Get a tab widget for the given usertypes.ClickTarget. Args: win_id: The window ID to open new tabs in target: A usertypes.ClickTarget """ if target == usertypes.ClickTarget.tab: bg_tab = False elif target == usertypes.ClickTarget.tab_bg: bg_tab = True elif target == usertypes.ClickTarget.window: tabbed_browser = objreg.get('tabbed-browser', scope='window', window=win_id) window = mainwindow.MainWindow(private=tabbed_browser.is_private) window.show() win_id = window.win_id bg_tab = False else: raise ValueError("Invalid ClickTarget {}".format(target)) tabbed_browser = objreg.get('tabbed-browser', scope='window', window=win_id) return tabbed_browser.tabopen(url=None, background=bg_tab) def get_user_stylesheet(searching=False): """Get the combined user-stylesheet.""" css = '' stylesheets = config.val.content.user_stylesheets for filename in stylesheets: with open(filename, 'r', encoding='utf-8') as f: css += f.read() setting = config.val.scrolling.bar if setting == 'overlay' and utils.is_mac: setting = 'when-searching' if setting == 'never' or setting == 'when-searching' and not searching: css += '\nhtml > ::-webkit-scrollbar { width: 0px; height: 0px; }' return css def netrc_authentication(url, authenticator): """Perform authorization using netrc. Args: url: The URL the request was done for. authenticator: QAuthenticator object used to set credentials provided. Return: True if netrc found credentials for the URL. False otherwise. """ if 'HOME' not in os.environ: # We'll get an OSError by netrc if 'HOME' isn't available in # os.environ. We don't want to log that, so we prevent it # altogether. return False user = None password = None authenticators = None try: net = netrc.netrc(config.val.content.netrc_file) if url.port() != -1: authenticators = net.authenticators( "{}:{}".format(url.host(), url.port())) if not authenticators: authenticators = net.authenticators(url.host()) if authenticators: user, _account, password = authenticators except FileNotFoundError: log.misc.debug("No .netrc file found") except OSError as e: log.misc.exception("Unable to read the netrc file: {}".format(e)) except netrc.NetrcParseError as e: log.misc.exception("Error when parsing the netrc file: {}".format(e)) if user is None: return False authenticator.setUser(user) authenticator.setPassword(password) return True def choose_file(multiple: bool) -> List[str]: """Select file(s) for uploading, using external command defined in config. Args: multiple: Should selecting multiple files be allowed. Return: A list of selected file paths, or empty list if no file is selected. If multiple is False, the return value will have at most 1 item. """ if multiple: command = config.val.fileselect.multiple_files.command else: command = config.val.fileselect.single_file.command use_tmp_file = any('{}' in arg for arg in command[1:]) if use_tmp_file: handle = tempfile.NamedTemporaryFile( prefix='qutebrowser-fileselect-', delete=False, ) handle.close() tmpfilename = handle.name with utils.cleanup_file(tmpfilename): command = ( command[:1] + [arg.replace('{}', tmpfilename) for arg in command[1:]] ) return _execute_fileselect_command( command=command, multiple=multiple, tmpfilename=tmpfilename, ) else: return _execute_fileselect_command( command=command, multiple=multiple, ) def _execute_fileselect_command( command: List[str], multiple: bool, tmpfilename: Optional[str] = None ) -> List[str]: """Execute external command to choose file. Args: multiple: Should selecting multiple files be allowed. tmpfilename: Path to the temporary file if used, otherwise None. Return: A list of selected file paths, or empty list if no file is selected. If multiple is False, the return value will have at most 1 item. """ proc = guiprocess.GUIProcess(what='choose-file') proc.start(command[0], command[1:]) loop = qtutils.EventLoop() proc.finished.connect(lambda _code, _status: loop.exit()) loop.exec() if tmpfilename is None: selected_files = proc.final_stdout.splitlines() else: try: with open(tmpfilename, mode='r', encoding=sys.getfilesystemencoding()) as f: selected_files = f.read().splitlines() except OSError as e: message.error(f"Failed to open tempfile {tmpfilename} ({e})!") selected_files = [] if not multiple: if len(selected_files) > 1: message.warning("More than one file chosen, using only the first") return selected_files[:1] return selected_files
1
25,938
Please do `import enum` instead, then use `enum.Enum` and `enum.unique` - we do this everywhere to see where things are coming from, except for Qt (everything begins with a `Q` anyways) and typing (mostly used in type annotations, so it's clear without the namespacing).
qutebrowser-qutebrowser
py
@@ -46,7 +46,6 @@ #include "codec.h" -#define ENCFAIL (uint)0 /* a value that is not a valid instruction */ /* Decode immediate argument of bitwise operations. * Returns zero if the encoding is invalid.
1
/* ********************************************************** * Copyright (c) 2016 ARM Limited. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of ARM Limited nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL ARM LIMITED OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* AArch64 decoder and encoder functions. * This file is rather large and should perhaps be split up, but there are many * opportunities for inlining which could be lost if it were split into separate * translation units, and it is helpful to have the per-operand-type decode/encode * functions next to each other. */ #include "../globals.h" #include "arch.h" #include "decode.h" #include "disassemble.h" #include "instr.h" #include "instr_create.h" #include "codec.h" #define ENCFAIL (uint)0 /* a value that is not a valid instruction */ /* Decode immediate argument of bitwise operations. * Returns zero if the encoding is invalid. */ static ptr_uint_t decode_bitmask(uint enc) { uint pos = enc >> 6 & 63; uint len = enc & 63; ptr_uint_t x; if (TEST(1U << 12, enc)) { if (len == 63) return 0; x = ((ptr_uint_t)1 << (len + 1)) - 1; return x >> pos | x << 1 << (63 - pos); } else { uint i, t = 32; while ((t & len) != 0) t >>= 1; if (t < 2) return 0; x = len & (t - 1); if (x == t - 1) return 0; x = ((ptr_uint_t)1 << (x + 1)) - 1; pos &= t - 1; x = x >> pos | x << (t - pos); for (i = 2; i < 64; i *= 2) { if (t <= i) x |= x << i; } return x; } } /* Encode immediate argument of bitwise operations. * Returns -1 if the value cannot be encoded. */ static int encode_bitmask(ptr_uint_t x) { int neg, rep, pos, len; neg = 0; if ((x & 1) != 0) neg = 1, x = ~x; if (x == 0) return -1; if (x >> 2 == (x & (((ptr_uint_t)1 << (64 - 2)) - 1))) rep = 2, x &= ((ptr_uint_t)1 << 2) - 1; else if (x >> 4 == (x & (((ptr_uint_t)1 << (64 - 4)) - 1))) rep = 4, x &= ((ptr_uint_t)1 << 4) - 1; else if (x >> 8 == (x & (((ptr_uint_t)1 << (64 - 8)) - 1))) rep = 8, x &= ((ptr_uint_t)1 << 8) - 1; else if (x >> 16 == (x & (((ptr_uint_t)1 << (64 - 16)) - 1))) rep = 16, x &= ((ptr_uint_t)1 << 16) - 1; else if (x >> 32 == (x & (((ptr_uint_t)1 << (64 - 32)) - 1))) rep = 32, x &= ((ptr_uint_t)1 << 32) - 1; else rep = 64; pos = 0; (x & (((ptr_uint_t)1 << 32) - 1)) != 0 ? 0 : (x >>= 32, pos += 32); (x & (((ptr_uint_t)1 << 16) - 1)) != 0 ? 0 : (x >>= 16, pos += 16); (x & (((ptr_uint_t)1 << 8) - 1)) != 0 ? 0 : (x >>= 8, pos += 8); (x & (((ptr_uint_t)1 << 4) - 1)) != 0 ? 0 : (x >>= 4, pos += 4); (x & (((ptr_uint_t)1 << 2) - 1)) != 0 ? 0 : (x >>= 2, pos += 2); (x & (((ptr_uint_t)1 << 1) - 1)) != 0 ? 0 : (x >>= 1, pos += 1); len = 0; (~x & (((ptr_uint_t)1 << 32) - 1)) != 0 ? 0 : (x >>= 32, len += 32); (~x & (((ptr_uint_t)1 << 16) - 1)) != 0 ? 0 : (x >>= 16, len += 16); (~x & (((ptr_uint_t)1 << 8) - 1)) != 0 ? 0 : (x >>= 8, len += 8); (~x & (((ptr_uint_t)1 << 4) - 1)) != 0 ? 0 : (x >>= 4, len += 4); (~x & (((ptr_uint_t)1 << 2) - 1)) != 0 ? 0 : (x >>= 2, len += 2); (~x & (((ptr_uint_t)1 << 1) - 1)) != 0 ? 0 : (x >>= 1, len += 1); if (x != 0) return -1; if (neg) { pos = (pos + len) & (rep - 1); len = rep - len; } return (0x1000 & rep << 6) | (((rep - 1) ^ 31) << 1 & 63) | ((rep - pos) & (rep - 1)) << 6 | (len - 1); } /* Extract signed integer from subfield of word. */ static inline ptr_int_t extract_int(uint enc, int pos, int len) { uint u = ((enc >> pos & (((uint)1 << (len - 1)) - 1)) - (enc >> pos & ((uint)1 << (len - 1)))); return u << 1 < u ? -(ptr_int_t)~u - 1 : u; } /* Extract unsigned integer from subfield of word. */ static inline ptr_uint_t extract_uint(uint enc, int pos, int len) { return enc >> pos & (((uint)1 << len) - 1); } static inline bool try_encode_int(OUT uint *bits, int len, int scale, ptr_int_t val) { /* If any of lowest 'scale' bits are set, or 'val' is out of range, fail. */ if (((ptr_uint_t)val & ((1U << scale) - 1)) != 0 || val < -((ptr_int_t)1 << (len + scale - 1)) || val >= (ptr_int_t)1 << (len + scale - 1)) return false; *bits = (ptr_uint_t)val >> scale & ((1U << len) - 1); return true; } static inline bool try_encode_imm(OUT uint *imm, int bits, opnd_t opnd) { ptr_int_t value; if (!opnd_is_immed_int(opnd)) return false; value = opnd_get_immed_int(opnd); if (!(0 <= value && value < (uint)1 << bits)) return false; *imm = value; return true; } static inline bool encode_pc_off(OUT uint *poff, int bits, byte *pc, instr_t *instr, opnd_t opnd) { ptr_uint_t off, range; ASSERT(0 < bits && bits <= 32); if (opnd.kind == PC_kind) off = opnd.value.pc - pc; else if (opnd.kind == INSTR_kind) off = (byte *)opnd_get_instr(opnd)->note - (byte *)instr->note; else return false; range = (ptr_uint_t)1 << bits; if (TEST(~((range - 1) << 2), off + (range << 1))) return false; *poff = off >> 2 & (range - 1); return true; } static inline opnd_t decode_sysreg(uint imm15) { reg_t sysreg; switch (imm15) { case 0x5a10: sysreg = DR_REG_NZCV; break; case 0x5a20: sysreg = DR_REG_FPCR; break; case 0x5a21: sysreg = DR_REG_FPSR; break; case 0x5e82: sysreg = DR_REG_TPIDR_EL0; break; default: return opnd_create_immed_uint(imm15, OPSZ_2); } return opnd_create_reg(sysreg); } static inline bool encode_sysreg(OUT uint *imm15, opnd_t opnd) { if (opnd_is_reg(opnd)) { switch (opnd_get_reg(opnd)) { case DR_REG_NZCV: *imm15 = 0x5a10; break; case DR_REG_FPCR: *imm15 = 0x5a20; break; case DR_REG_FPSR: *imm15 = 0x5a21; break; case DR_REG_TPIDR_EL0: *imm15 = 0x5e82; break; default: return false; } return true; } if (opnd_is_immed_int(opnd)) { uint imm; if (try_encode_imm(&imm, 15, opnd) && !opnd_is_reg(decode_sysreg(imm))) { *imm15 = imm; return true; } return false; } return false; } /* Decode integer register. Input 'n' is number from 0 to 31, where * 31 can mean stack pointer or zero register, depending on 'is_sp'. */ static inline reg_id_t decode_reg(uint n, bool is_x, bool is_sp) { return (n < 31 ? (is_x ? DR_REG_X0 : DR_REG_W0) + n : is_sp ? (is_x ? DR_REG_XSP : DR_REG_WSP) : (is_x ? DR_REG_XZR : DR_REG_WZR)); } /* Encode integer register. */ static inline bool encode_reg(OUT uint *num, OUT bool *is_x, reg_id_t reg, bool is_sp) { if (DR_REG_X0 <= reg && reg <= DR_REG_X30) { *num = reg - DR_REG_X0; *is_x = true; return true; } if (DR_REG_W0 <= reg && reg <= DR_REG_W30) { *num = reg - DR_REG_W0; *is_x = false; return true; } if (is_sp && (reg == DR_REG_XSP || reg == DR_REG_WSP)) { *num = 31; *is_x = (reg == DR_REG_XSP); return true; } if (!is_sp && (reg == DR_REG_XZR || reg == DR_REG_WZR)) { *num = 31; *is_x = (reg == DR_REG_XZR); return true; } return false; } /* Decode SIMD/FP register. */ static inline opnd_t decode_vreg(uint scale, uint n) { reg_id_t reg = DR_REG_NULL; ASSERT(n < 32 && scale < 5); switch (scale) { case 0: reg = DR_REG_B0 + n; break; case 1: reg = DR_REG_H0 + n; break; case 2: reg = DR_REG_S0 + n; break; case 3: reg = DR_REG_D0 + n; break; case 4: reg = DR_REG_Q0 + n; break; } return opnd_create_reg(reg); } /* Encode SIMD/FP register. */ static inline bool encode_vreg(INOUT opnd_size_t *x, OUT uint *r, opnd_t opnd) { reg_id_t reg; opnd_size_t sz; uint n; if (!opnd_is_reg(opnd)) return false; reg = opnd_get_reg(opnd); if ((uint)(reg - DR_REG_B0) < 32) { n = reg - DR_REG_B0; sz = OPSZ_1; } else if ((uint)(reg - DR_REG_H0) < 32) { n = reg - DR_REG_H0; sz = OPSZ_2; } else if ((uint)(reg - DR_REG_S0) < 32) { n = reg - DR_REG_S0; sz = OPSZ_4; } else if ((uint)(reg - DR_REG_D0) < 32) { n = reg - DR_REG_D0; sz = OPSZ_8; } else if ((uint)(reg - DR_REG_Q0) < 32) { n = reg - DR_REG_Q0; sz = OPSZ_16; } else return false; if (*x == OPSZ_NA) *x = sz; else if (*x != sz) return false; *r = n; return true; } static opnd_t create_base_imm(uint enc, int disp, int bytes) { /* The base register number comes from bits 5 to 9. It may be SP. */ return opnd_create_base_disp(decode_reg(extract_uint(enc, 5, 5), true, true), DR_REG_NULL, 0, disp, opnd_size_from_bytes(bytes)); } static bool is_base_imm(opnd_t opnd, OUT uint *regnum) { uint n; bool is_x; if (!opnd_is_base_disp(opnd) || opnd_get_index(opnd) != DR_REG_NULL || !encode_reg(&n, &is_x, opnd_get_base(opnd), true) || !is_x) return false; *regnum = n; return true; } /* Used for mem7* operand types, which have a 7-bit offset and are used by * load/store (pair) instructions. Returns the scale (log base 2 of number * of bytes) of the memory argument, a function of bits 26, 30 and 31. */ static int mem7_scale(uint enc) { return 2 + (TEST(1U << 26, enc) ? extract_uint(enc, 30, 2) : extract_uint(enc, 31, 1)); } /* Used for memlit operand type, used by load (literal). Returns the size * of the memory operand, a function of bits 26, 30 and 31. */ static opnd_size_t memlit_size(uint enc) { opnd_size_t size = OPSZ_0; switch (extract_uint(enc, 30, 2)) { case 0: size = OPSZ_4; break; case 1: size = OPSZ_8; break; case 2: size = TEST(1U << 26, enc) ? OPSZ_16 : OPSZ_4; } return size; } /* Returns the number of registers accessed by SIMD load structure and replicate, * a function of bits 13 and 21. */ static int memvr_regcount(uint enc) { return ((enc >> 13 & 1) << 1 | (enc >> 21 & 1)) + 1; } /* Used for memvs operand type, used by SIMD load/store single structure. * Returns the number of bytes read or written, which is a function of * bits 10, 11, 13, 14, 15 and 21. */ static int memvs_size(uint enc) { int scale = extract_uint(enc, 14, 2); /* Number of elements in structure, 1 to 4. */ int elems = memvr_regcount(enc); int size = extract_uint(enc, 10, 2); if (scale == 2 && size == 1) scale = 3; return elems * (1 << scale); } /* Returns the number of registers accessed by SIMD load/store multiple structures, * a function of bits 12-15. */ static int multistruct_regcount(uint enc) { switch (extract_uint(enc, 12, 4)) { case 0: return 4; case 2: return 4; case 4: return 3; case 6: return 3; case 7: return 1; case 8: return 2; case 10: return 2; } ASSERT(false); return 0; } /******************************************************************************* * Pairs of functions for decoding and encoding a generalised type of operand. */ /* adr_page: used for adr, adrp */ static bool decode_opnd_adr_page(int scale, uint enc, byte *pc, OUT opnd_t *opnd) { uint bits = (enc >> 3 & 0x1ffffc) | (enc >> 29 & 3); byte *addr = ((byte *)((ptr_uint_t)pc >> scale << scale) + extract_int(bits, 0, 21) * ((ptr_int_t)1 << scale)); *opnd = opnd_create_rel_addr(addr, OPSZ_0); return true; } static bool encode_opnd_adr_page(int scale, byte *pc, opnd_t opnd, OUT uint *enc_out) { void *addr; uint bits; if (!opnd_is_rel_addr(opnd)) return false; addr = opnd_get_addr(opnd); if (!try_encode_int(&bits, 21, scale, (ptr_int_t)addr - (ptr_int_t)((ptr_uint_t)pc >> scale << scale))) return false; *enc_out = (bits & 3) << 29 | (bits & 0x1ffffc) << 3; return true; } /* dq_plus: used for dq0, dq0p1, dq0p2, dq0p3 */ static inline bool decode_opnd_dq_plus(int add, int qpos, uint enc, OUT opnd_t *opnd) { *opnd = opnd_create_reg((TEST(1U << qpos, enc) ? DR_REG_Q0 : DR_REG_D0) + (extract_uint(enc, 0, 5) + add) % 32); return true; } static inline bool encode_opnd_dq_plus(int add, int qpos, opnd_t opnd, OUT uint *enc_out) { uint num; bool q; if (!opnd_is_reg(opnd)) return false; q = (uint)(opnd_get_reg(opnd) - DR_REG_Q0) < 32; num = opnd_get_reg(opnd) - (q ? DR_REG_Q0 : DR_REG_D0); if (num >= 32) return false; *enc_out = (num - add) % 32 | (uint)q << qpos; return true; } /* index: used for opnd_index0, ..., opnd_index3 */ static bool decode_opnd_index(int n, uint enc, OUT opnd_t *opnd) { uint bits = (enc >> 30 & 1) << 3 | (enc >> 10 & 7); *opnd = opnd_create_immed_int(bits >> n, OPSZ_4b); return true; } static bool encode_opnd_index(int n, opnd_t opnd, OUT uint *enc_out) { ptr_int_t val; uint bits; if (!opnd_is_immed_int(opnd)) return false; val = opnd_get_immed_int(opnd); if (val < 0 || val >= 16 >> n) return false; bits = val << n; *enc_out = (bits >> 3 & 1) << 30 | (bits & 7) << 10; return true; } /* int: used for almost every operand type that is an immediate integer */ static bool decode_opnd_int(int pos, int len, bool signd, int scale, opnd_size_t size, dr_opnd_flags_t flags, uint enc, OUT opnd_t *opnd) { ptr_int_t val = signd ? extract_int(enc, pos, len) : extract_uint(enc, pos, len); *opnd = opnd_add_flags(opnd_create_immed_int(val * ((ptr_int_t)1 << scale), size), flags); return true; } static bool encode_opnd_int(int pos, int len, bool signd, int scale, dr_opnd_flags_t flags, opnd_t opnd, OUT uint *enc_out) { ptr_uint_t val; if (!opnd_is_immed_int(opnd) || (opnd_get_flags(opnd) & flags) != flags) return false; val = opnd_get_immed_int(opnd); if ((val & (((ptr_uint_t)1 << scale) - 1)) != 0) return false; if ((val + (signd ? ((ptr_uint_t)1 << (len + scale - 1)) : 0)) >> (len + scale) != 0) return false; *enc_out = (val >> scale & (((ptr_uint_t)1 << (len - 1)) * 2 - 1)) << pos; return true; } /* imm_bf: used for bitfield immediate operands */ static bool decode_opnd_imm_bf(int pos, uint enc, OUT opnd_t *opnd) { if (!TEST(1U << 31, enc) && extract_uint(enc, pos, 6) >= 32) return false; return decode_opnd_int(pos, 6, false, 0, OPSZ_6b, 0, enc, opnd); } static bool encode_opnd_imm_bf(int pos, uint enc, opnd_t opnd, uint *enc_out) { if (!TEST(1U << 31, enc) && extract_uint(enc, pos, 6) >= 32) return false; return encode_opnd_int(pos, 6, false, 0, 0, opnd, enc_out); } /* mem0_scale: used for mem0, mem0p */ static inline bool decode_opnd_mem0_scale(int scale, uint enc, OUT opnd_t *opnd) { *opnd = create_base_imm(enc, 0, 1 << scale); return true; } static inline bool encode_opnd_mem0_scale(int scale, opnd_t opnd, OUT uint *enc_out) { uint xn; if (!is_base_imm(opnd, &xn) || opnd_get_size(opnd) != opnd_size_from_bytes(1 << scale) || opnd_get_disp(opnd) != 0) return false; *enc_out = xn << 5; return true; } /* mem12_scale: used for mem12, mem12q, prf12 */ static inline bool decode_opnd_mem12_scale(int scale, bool prfm, uint enc, OUT opnd_t *opnd) { *opnd = create_base_imm(enc, extract_uint(enc, 10, 12) << scale, prfm ? 0 : 1 << scale); return true; } static inline bool encode_opnd_mem12_scale(int scale, bool prfm, opnd_t opnd, OUT uint *enc_out) { int disp; uint xn; if (!is_base_imm(opnd, &xn) || opnd_get_size(opnd) != (prfm ? OPSZ_0 : opnd_size_from_bytes(1 << scale))) return false; disp = opnd_get_disp(opnd); if (disp < 0 || disp >> scale > 0xfff || disp >> scale << scale != disp) return false; *enc_out = xn << 5 | (uint)disp >> scale << 10; return true; } /* mem7_postindex: used for mem7, mem7post */ static inline bool decode_opnd_mem7_postindex(bool post, uint enc, OUT opnd_t *opnd) { int scale = mem7_scale(enc); *opnd = create_base_imm(enc, post ? 0 : extract_int(enc, 15, 7) * (1 << scale), 2 << scale); opnd->value.base_disp.pre_index = !post; return true; } static inline bool encode_opnd_mem7_postindex(bool post, uint enc, opnd_t opnd, OUT uint *enc_out) { int scale = mem7_scale(enc); int disp; uint xn; if (!is_base_imm(opnd, &xn) || opnd_get_size(opnd) != opnd_size_from_bytes(2 << scale)) return false; disp = opnd_get_disp(opnd); if (disp == 0 && opnd.value.base_disp.pre_index == post) return false; if (post ? disp != 0 : ((uint)disp & ((1 << scale) - 1)) != 0 || (uint)disp + (0x40 << scale) >= (0x80 << scale)) return false; *enc_out = xn << 5 | ((uint)disp >> scale & 0x7f) << 15; return true; } /* mem9_bytes: used for mem9, mem9post, mem9q, mem9qpost, prf9 */ static inline bool decode_opnd_mem9_bytes(int bytes, bool post, uint enc, OUT opnd_t *opnd) { *opnd = create_base_imm(enc, post ? 0 : extract_int(enc, 12, 9), bytes); opnd->value.base_disp.pre_index = !post; return true; } static inline bool encode_opnd_mem9_bytes(int bytes, bool post, opnd_t opnd, OUT uint *enc_out) { int disp; uint xn; if (!is_base_imm(opnd, &xn) || opnd_get_size(opnd) != opnd_size_from_bytes(bytes)) return false; disp = opnd_get_disp(opnd); if (disp == 0 && opnd.value.base_disp.pre_index == post) return false; if (post ? (disp != 0) : (disp < -256 || disp > 255)) return false; *enc_out = xn << 5 | ((uint)disp & 0x1ff) << 12; return true; } /* memreg_size: used for memreg, memregq, prfreg */ static inline bool decode_opnd_memreg_size(opnd_size_t size, uint enc, OUT opnd_t *opnd) { if (!TEST(1U << 14, enc)) return false; *opnd = opnd_create_base_disp_aarch64(decode_reg(enc >> 5 & 31, true, true), decode_reg(enc >> 16 & 31, true, false), enc >> 13 & 7, TEST(1U << 12, enc), 0, 0, size); return true; } static inline bool encode_opnd_memreg_size(opnd_size_t size, opnd_t opnd, OUT uint *enc_out) { uint rn, rm, option; bool xn, xm, scaled; if (!opnd_is_base_disp(opnd) || opnd_get_size(opnd) != size || opnd_get_disp(opnd) != 0) return false; option = opnd_get_index_extend(opnd, &scaled, NULL); if (!TEST(2, option)) return false; if (!encode_reg(&rn, &xn, opnd_get_base(opnd), true) || !xn || !encode_reg(&rm, &xm, opnd_get_index(opnd), false) || !xm) return false; *enc_out = rn << 5 | rm << 16 | option << 13 | (uint)scaled << 12; return true; } /* q0p: used for q0p1, q0p2, q0p3 */ static bool decode_opnd_q0p(int add, uint enc, OUT opnd_t *opnd) { *opnd = decode_vreg(4, (extract_uint(enc, 0, 5) + add) % 32); return true; } static bool encode_opnd_q0p(int add, opnd_t opnd, OUT uint *enc_out) { opnd_size_t size = OPSZ_NA; uint r; if (!encode_vreg(&size, &r, opnd) || size != OPSZ_16) return false; *enc_out = (r - add) % 32; return true; } /* rn: used for many integer register operands where bit 31 specifies W or X */ static inline bool decode_opnd_rn(bool is_sp, int pos, uint enc, OUT opnd_t *opnd) { *opnd = opnd_create_reg(decode_reg(extract_uint(enc, pos, 5), TEST(1U << 31, enc), is_sp)); return true; } static inline bool encode_opnd_rn(bool is_sp, int pos, opnd_t opnd, OUT uint *enc_out) { uint num; bool is_x; if (!opnd_is_reg(opnd) || !encode_reg(&num, &is_x, opnd_get_reg(opnd), is_sp)) return false; *enc_out = (uint)is_x << 31 | num << pos; return true; } /* vector_reg: used for many FP/SIMD register operands */ static bool decode_opnd_vector_reg(int pos, int scale, uint enc, OUT opnd_t *opnd) { *opnd = decode_vreg(scale, extract_uint(enc, pos, 5)); return true; } static bool encode_opnd_vector_reg(int pos, int scale, opnd_t opnd, OUT uint *enc_out) { opnd_size_t size = OPSZ_NA; uint r; if (!encode_vreg(&size, &r, opnd) || size != opnd_size_from_bytes(1 << scale)) return false; *enc_out = r << pos; return true; } /* vtn: used for vt0, ..., vt3 */ static bool decode_opnd_vtn(int add, uint enc, OUT opnd_t *opnd) { if (extract_uint(enc, 10, 2) == 3 && extract_uint(enc, 30, 1) == 0) return false; *opnd = opnd_create_reg((TEST(1U << 30, enc) ? DR_REG_Q0 : DR_REG_D0) + ((extract_uint(enc, 0, 5) + add) % 32)); return true; } static bool encode_opnd_vtn(int add, uint enc, opnd_t opnd, OUT uint *enc_out) { reg_t reg; uint num; bool q; if (!opnd_is_reg(opnd)) return false; reg = opnd_get_reg(opnd); q = (uint)(reg - DR_REG_Q0) < 32; if (extract_uint(enc, 10, 2) == 3 && !q) return false; num = reg - (q ? DR_REG_Q0 : DR_REG_D0); if (num >= 32) return false; *enc_out = (num - add) % 32 | (uint)q << 30; return true; } /* wxn: used for many integer register operands with fixed size (W or X) */ static bool decode_opnd_wxn(bool is_x, bool is_sp, int pos, uint enc, OUT opnd_t *opnd) { *opnd = opnd_create_reg(decode_reg(enc >> pos & 31, is_x, is_sp)); return true; } static bool encode_opnd_wxn(bool is_x, bool is_sp, int pos, opnd_t opnd, OUT uint *enc_out) { reg_id_t reg; uint n; if (!opnd_is_reg(opnd)) return false; reg = opnd_get_reg(opnd); n = reg - (is_x ? DR_REG_X0 : DR_REG_W0); if (n < 31) { *enc_out = n << pos; return true; } if (reg == (is_sp ? (is_x ? DR_REG_XSP : DR_REG_WSP) : (is_x ? DR_REG_XZR : DR_REG_WZR))) { *enc_out = (uint)31 << pos; return true; } return false; } /* wxnp: used for CASP, even/odd register pairs */ static bool decode_opnd_wxnp(bool is_x, int plus, int pos, uint enc, OUT opnd_t *opnd) { if ((enc >> pos & 1) != 0) return false; *opnd = opnd_create_reg(decode_reg(((enc >> pos) + plus) & 31, is_x, false)); return true; } static bool encode_opnd_wxnp(bool is_x, int plus, int pos, opnd_t opnd, OUT uint *enc_out) { reg_id_t reg; uint n; if (!opnd_is_reg(opnd)) return false; reg = opnd_get_reg(opnd); n = reg - (is_x ? DR_REG_X0 : DR_REG_W0); if (n < 31 && (n - plus) % 2 == 0) { *enc_out = ((n - plus) & 31) << pos; return true; } if (reg == (is_x ? DR_REG_XZR : DR_REG_WZR) && ((uint)31 - plus) % 2 == 0) { *enc_out = (((uint)31 - plus) & 31) << pos; return true; } return false; } /******************************************************************************* * Pairs of functions for decoding and encoding each type of operand, as listed in * "codec.txt". Try to keep these short: perhaps a tail call to a function in the * previous section. */ /* adr: operand of ADR */ static inline bool decode_opnd_adr(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_adr_page(0, enc, pc, opnd); } static inline bool encode_opnd_adr(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_adr_page(0, pc, opnd, enc_out); } /* adrp: operand of ADRP */ static inline bool decode_opnd_adrp(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_adr_page(12, enc, pc, opnd); } static inline bool encode_opnd_adrp(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_adr_page(12, pc, opnd, enc_out); } /* b0: B register at bit position 0 */ static inline bool decode_opnd_b0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_vector_reg(0, 0, enc, opnd); } static inline bool encode_opnd_b0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_vector_reg(0, 0, opnd, enc_out); } /* cond: condition operand for conditional compare */ static inline bool decode_opnd_cond(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_int(12, 4, false, 0, OPSZ_4b, DR_OPND_IS_CONDITION, enc, opnd); } static inline bool encode_opnd_cond(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_int(12, 4, false, 0, 0, opnd, enc_out); } /* d0: D register at bit position 0 */ static inline bool decode_opnd_d0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_vector_reg(0, 3, enc, opnd); } static inline bool encode_opnd_d0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_vector_reg(0, 3, opnd, enc_out); } /* d10: D register at bit position 10 */ static inline bool decode_opnd_d10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_vector_reg(10, 3, enc, opnd); } static inline bool encode_opnd_d10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_vector_reg(10, 3, opnd, enc_out); } /* dq0: D/Q register at bit position 0; bit 30 selects Q reg */ static inline bool decode_opnd_dq0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_dq_plus(0, 30, enc, opnd); } static inline bool encode_opnd_dq0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_dq_plus(0, 30, opnd, enc_out); } /* dq0p1: as dq0 but add 1 mod 32 to reg number */ static inline bool decode_opnd_dq0p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_dq_plus(1, 30, enc, opnd); } static inline bool encode_opnd_dq0p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_dq_plus(1, 30, opnd, enc_out); } /* dq0p2: as dq0 but add 2 mod 32 to reg number */ static inline bool decode_opnd_dq0p2(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_dq_plus(2, 30, enc, opnd); } static inline bool encode_opnd_dq0p2(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_dq_plus(2, 30, opnd, enc_out); } /* dq0p3: as dq0 but add 3 mod 32 to reg number */ static inline bool decode_opnd_dq0p3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_dq_plus(3, 30, enc, opnd); } static inline bool encode_opnd_dq0p3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_dq_plus(3, 30, opnd, enc_out); } /* ext: extend type, dr_extend_type_t */ static inline bool decode_opnd_ext(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_int(13, 3, false, 0, OPSZ_3b, DR_OPND_IS_EXTEND, enc, opnd); } static inline bool encode_opnd_ext(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_int(13, 3, false, 0, DR_OPND_IS_EXTEND, opnd, enc_out); } /* extam: extend amount, a left shift from 0 to 4 */ static inline bool decode_opnd_extam(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { if (extract_uint(enc, 10, 3) > 4) /* shift amount must be <= 4 */ return false; return decode_opnd_int(10, 3, false, 0, OPSZ_3b, 0, enc, opnd); } static inline bool encode_opnd_extam(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { uint t; if (!encode_opnd_int(10, 3, false, 0, 0, opnd, &t) || extract_uint(t, 10, 3) > 4) /* shift amount must be <= 4 */ return false; *enc_out = t; return true; } /* h0: H register at bit position 0 */ static inline bool decode_opnd_h0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_vector_reg(0, 1, enc, opnd); } static inline bool encode_opnd_h0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_vector_reg(0, 1, opnd, enc_out); } /* ign10: ignored register field at bit position 10 in load/store exclusive */ static inline bool decode_opnd_ign10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_int(10, 5, false, 0, OPSZ_5b, 0, enc, opnd); } static inline bool encode_opnd_ign10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_int(10, 5, false, 0, 0, opnd, enc_out); } /* ign10: ignored register field at bit position 16 in load/store exclusive */ static inline bool decode_opnd_ign16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_int(16, 5, false, 0, OPSZ_5b, 0, enc, opnd); } static inline bool encode_opnd_ign16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_int(16, 5, false, 0, 0, opnd, enc_out); } /* imm12: 12-bit immediate operand of ADD/SUB */ static inline bool decode_opnd_imm12(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_int(10, 12, false, 0, OPSZ_12b, 0, enc, opnd); } static inline bool encode_opnd_imm12(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_int(10, 12, false, 0, 0, opnd, enc_out); } /* imm12sh: shift amount for 12-bit immediate of ADD/SUB, 0 or 16 */ static inline bool decode_opnd_imm12sh(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_int(22, 1, false, 4, OPSZ_5b, 0, enc, opnd); } static inline bool encode_opnd_imm12sh(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_int(22, 1, false, 4, 0, opnd, enc_out); } /* imm16: 16-bit immediate operand of MOVK/MOVN/MOVZ/SVC */ static inline bool decode_opnd_imm16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_int(5, 16, false, 0, OPSZ_12b, 0, enc, opnd); } static inline bool encode_opnd_imm16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_int(5, 16, false, 0, 0, opnd, enc_out); } /* imm16sh: shift amount for 16-bit immediate of MOVK/MOVN/MOVZ/SVC */ static inline bool decode_opnd_imm16sh(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { if (!TEST(1U << 31, enc) && TEST(1U << 22, enc)) return false; return decode_opnd_int(21, 2, false, 4, OPSZ_6b, 0, enc, opnd); } static inline bool encode_opnd_imm16sh(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { uint t; if (!encode_opnd_int(21, 2, false, 4, 0, opnd, &t) || (!TEST(1U << 31, enc) && TEST(1U << 22, t))) return false; *enc_out = t; return true; } /* imm4: immediate operand for some system instructions */ static inline bool decode_opnd_imm4(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_int(8, 4, false, 0, OPSZ_4b, 0, enc, opnd); } static inline bool encode_opnd_imm4(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_int(8, 4, false, 0, 0, opnd, enc_out); } /* imm5: immediate operand for conditional compare (immediate) */ static inline bool decode_opnd_imm5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_int(16, 5, false, 0, OPSZ_6b, 0, enc, opnd); } static inline bool encode_opnd_imm5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_int(16, 5, false, 0, 0, opnd, enc_out); } /* imm6: shift amount for logical and arithmetical instructions */ static inline bool decode_opnd_imm6(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_int(10, 6, false, 0, OPSZ_6b, 0, enc, opnd); } static inline bool encode_opnd_imm6(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_int(10, 6, false, 0, 0, opnd, enc_out); } /* immr: first immediate operand for bitfield operation */ static inline bool decode_opnd_immr(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_imm_bf(16, enc, opnd); } static inline bool encode_opnd_immr(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_imm_bf(16, enc, opnd, enc_out); } /* imms: second immediate operand for bitfield operation */ static inline bool decode_opnd_imms(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_imm_bf(10, enc, opnd); } static inline bool encode_opnd_imms(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_imm_bf(10, enc, opnd, enc_out); } /* index0: index of B subreg in Q register: 0-15 */ static inline bool decode_opnd_index0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_index(0, enc, opnd); } static inline bool encode_opnd_index0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_index(0, opnd, enc_out); } /* index1: index of H subreg in Q register: 0-7 */ static inline bool decode_opnd_index1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_index(1, enc, opnd); } static inline bool encode_opnd_index1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_index(1, opnd, enc_out); } /* index2: index of S subreg in Q register: 0-3 */ static inline bool decode_opnd_index2(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_index(2, enc, opnd); } static inline bool encode_opnd_index2(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_index(2, opnd, enc_out); } /* index3: index of D subreg in Q register: 0-1 */ static inline bool decode_opnd_index3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_index(3, enc, opnd); } static inline bool encode_opnd_index3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_index(3, opnd, enc_out); } /* lsl: constant LSL for ADD/MOV, no encoding bits */ static inline bool decode_opnd_lsl(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { uint t = DR_SHIFT_LSL; return decode_opnd_int(0, 2, false, 0, OPSZ_2b, DR_OPND_IS_SHIFT, t, opnd); } static inline bool encode_opnd_lsl(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { uint t; if (!encode_opnd_int(0, 2, false, 0, DR_OPND_IS_SHIFT, opnd, &t) || t != DR_SHIFT_LSL) return false; *enc_out = 0; return true; } /* mem0: memory operand with no offset, gets size from bits 30 and 31 */ static inline bool decode_opnd_mem0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_mem0_scale(extract_uint(enc, 30, 2), enc, opnd); } static inline bool encode_opnd_mem0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_mem0_scale(extract_uint(enc, 30, 2), opnd, enc_out); } /* mem0p: as mem0, but a pair of registers, so double size */ static inline bool decode_opnd_mem0p(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_mem0_scale(extract_uint(enc, 30, 1) + 3, enc, opnd); } static inline bool encode_opnd_mem0p(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_mem0_scale(extract_uint(enc, 30, 1) + 3, opnd, enc_out); } /* mem12: memory operand with 12-bit offset; gets size from bits 30 and 31 */ static inline bool decode_opnd_mem12(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_mem12_scale(extract_uint(enc, 30, 2), false, enc, opnd); } static inline bool encode_opnd_mem12(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_mem12_scale(extract_uint(enc, 30, 2), false, opnd, enc_out); } /* mem12: memory operand with 12-bit offset; size is 16 bytes */ static inline bool decode_opnd_mem12q(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_mem12_scale(4, false, enc, opnd); } static inline bool encode_opnd_mem12q(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_mem12_scale(4, false, opnd, enc_out); } /* mem7: memory operand with 7-bit offset; gets size from bits 26, 30 and 31 */ static inline bool decode_opnd_mem7(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_mem7_postindex(false, enc, opnd); } static inline bool encode_opnd_mem7(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_mem7_postindex(false, enc, opnd, enc_out); } /* mem7off: just the 7-bit offset from mem7 */ static inline bool decode_opnd_mem7off(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_int(15, 7, true, mem7_scale(enc), OPSZ_PTR, 0, enc, opnd); } static inline bool encode_opnd_mem7off(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_int(15, 7, true, mem7_scale(enc), 0, opnd, enc_out); } /* mem7off: post-indexed mem7, so offset is zero */ static inline bool decode_opnd_mem7post(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_mem7_postindex(true, enc, opnd); } static inline bool encode_opnd_mem7post(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_mem7_postindex(true, enc, opnd, enc_out); } /* mem9: memory operand with 9-bit offset; gets size from bits 30 and 31 */ static inline bool decode_opnd_mem9(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_mem9_bytes(1 << extract_uint(enc, 30, 2), false, enc, opnd); } static inline bool encode_opnd_mem9(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_mem9_bytes(1 << extract_uint(enc, 30, 2), false, opnd, enc_out); } /* mem9off: just the 9-bit offset from mem9 */ static inline bool decode_opnd_mem9off(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_int(12, 9, true, 0, OPSZ_PTR, 0, enc, opnd); } static inline bool encode_opnd_mem9off(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_int(12, 9, true, 0, 0, opnd, enc_out); } /* mem9post: post-indexed mem9, so offset is zero */ static inline bool decode_opnd_mem9post(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_mem9_bytes(1 << extract_uint(enc, 30, 2), true, enc, opnd); } static inline bool encode_opnd_mem9post(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_mem9_bytes(1 << extract_uint(enc, 30, 2), true, opnd, enc_out); } /* mem9q: memory operand with 9-bit offset; size is 16 bytes */ static inline bool decode_opnd_mem9q(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_mem9_bytes(16, false, enc, opnd); } static inline bool encode_opnd_mem9q(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_mem9_bytes(16, false, opnd, enc_out); } /* mem9qpost: post-indexed mem9q, so offset is zero */ static inline bool decode_opnd_mem9qpost(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_mem9_bytes(16, true, enc, opnd); } static inline bool encode_opnd_mem9qpost(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_mem9_bytes(16, true, opnd, enc_out); } /* memlit: memory operand for literal load; gets size from bits 26, 30 and 31 */ static inline bool decode_opnd_memlit(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { *opnd = opnd_create_rel_addr(pc + 4 * extract_int(enc, 5, 19), memlit_size(enc)); return true; } static inline bool encode_opnd_memlit(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { ptr_uint_t off; if (!opnd_is_rel_addr(opnd) || opnd_get_size(opnd) != memlit_size(enc)) return false; off = (byte *)opnd_get_addr(opnd) - pc; if ((off & 3) != 0 || off + (1U << 20) >= 1U << 21) return false; *enc_out = (off >> 2 & 0x7ffff) << 5; return true; } /* memreg: memory operand with register offset; gets size from bits 30 and 31 */ static inline bool decode_opnd_memreg(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_memreg_size(opnd_size_from_bytes(1 << extract_uint(enc, 30, 2)), enc, opnd); } static inline bool encode_opnd_memreg(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_memreg_size(opnd_size_from_bytes(1 << extract_uint(enc, 30, 2)), opnd, enc_out); } /* memreqq: memory operand with register offset; size is 16 bytes */ static inline bool decode_opnd_memregq(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_memreg_size(OPSZ_16, enc, opnd); } static inline bool encode_opnd_memregq(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_memreg_size(OPSZ_16, opnd, enc_out); } /* memvm: memory operand for SIMD load/store multiple structures */ static inline bool decode_opnd_memvm(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { int bytes = (8 << extract_uint(enc, 30, 1)) * multistruct_regcount(enc); *opnd = create_base_imm(enc, 0, bytes); return true; } static inline bool encode_opnd_memvm(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { int regs = multistruct_regcount(enc); opnd_size_t size; uint rn; if (!is_base_imm(opnd, &rn) || opnd_get_disp(opnd) != 0) return false; size = opnd_get_size(opnd); if (size != opnd_size_from_bytes(regs * 8) && size != opnd_size_from_bytes(regs * 16)) return false; *enc_out = rn << 5 | (uint)(size == opnd_size_from_bytes(regs * 16)) << 30; return true; } /* memvr: memory operand for SIMD load structure and replicate */ static inline bool decode_opnd_memvr(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { int bytes = memvr_regcount(enc) << extract_uint(enc, 10, 2); *opnd = create_base_imm(enc, 0, bytes); return true; } static inline bool encode_opnd_memvr(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { int bytes, regcount; uint rn; if (!is_base_imm(opnd, &rn) || opnd_get_disp(opnd) != 0) return false; bytes = opnd_size_in_bytes(opnd_get_size(opnd)); regcount = memvr_regcount(enc); if (bytes % regcount != 0) return false; bytes /= regcount; if (bytes < 1 || bytes > 8 || (bytes & (bytes - 1)) != 0 || opnd_size_from_bytes(bytes * regcount) != opnd_get_size(opnd)) return false; *enc_out = (rn << 5 | (bytes == 1 ? 0 : bytes == 2 ? 1 : bytes == 4 ? 2 : 3) << 10); return true; } /* memvrpost: post-indexed memvr */ static inline bool decode_opnd_memvrpost(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { int bytes = memvr_regcount(enc) << extract_uint(enc, 10, 2); *opnd = create_base_imm(enc, 0, bytes); return true; } static inline bool encode_opnd_memvrpost(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { int bytes = memvr_regcount(enc) << extract_uint(enc, 10, 2); uint rn; if (!is_base_imm(opnd, &rn) || opnd_get_disp(opnd) != 0 || opnd_get_size(opnd) != opnd_size_from_bytes(bytes)) return false; *enc_out = rn << 5; return true; } /* memvs: memory operand for SIMD load/store single structure */ static inline bool decode_opnd_memvs(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { int bytes = memvs_size(enc); *opnd = create_base_imm(enc, 0, bytes); return true; } static inline bool encode_opnd_memvs(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { uint rn; if (!is_base_imm(opnd, &rn) || opnd_get_disp(opnd) != 0) return false; if (opnd_get_size(opnd) != opnd_size_from_bytes(memvs_size(enc))) return false; *enc_out = rn << 5; return true; } /* nzcv: flag bit specifier for conditional compare */ static inline bool decode_opnd_nzcv(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_int(0, 4, false, 0, OPSZ_4b, 0, enc, opnd); } static inline bool encode_opnd_nzcv(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_int(0, 4, false, 0, 0, opnd, enc_out); } /* prf12: prefetch variant of mem12 */ static inline bool decode_opnd_prf12(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_mem12_scale(3, true, enc, opnd); } static inline bool encode_opnd_prf12(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_mem12_scale(3, true, opnd, enc_out); } /* prf9: prefetch variant of mem9 */ static inline bool decode_opnd_prf9(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_mem9_bytes(0, false, enc, opnd); } static inline bool encode_opnd_prf9(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_mem9_bytes(0, false, opnd, enc_out); } /* prfop: prefetch operation, such as PLDL1KEEP */ static inline bool decode_opnd_prfop(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_int(0, 5, false, 0, OPSZ_5b, 0, enc, opnd); } static inline bool encode_opnd_prfop(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_int(0, 5, false, 0, 0, opnd, enc_out); } /* prfreg: prefetch variant of memreg */ static inline bool decode_opnd_prfreg(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_memreg_size(OPSZ_0, enc, opnd); } static inline bool encode_opnd_prfreg(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_memreg_size(OPSZ_0, opnd, enc_out); } static inline bool decode_opnd_q0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_vector_reg(0, 4, enc, opnd); } /* q0: Q register at bit position 0 */ static inline bool encode_opnd_q0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_vector_reg(0, 4, opnd, enc_out); } /* q0p1: as q0 but add 1 mod 32 to reg number */ static inline bool decode_opnd_q0p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_q0p(1, enc, opnd); } static inline bool encode_opnd_q0p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_q0p(1, opnd, enc_out); } /* q0p2: as q0 but add 2 mod 32 to reg number */ static inline bool decode_opnd_q0p2(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_q0p(2, enc, opnd); } static inline bool encode_opnd_q0p2(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_q0p(2, opnd, enc_out); } /* q0p3: as q0 but add 3 mod 32 to reg number */ static inline bool decode_opnd_q0p3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_q0p(3, enc, opnd); } static inline bool encode_opnd_q0p3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_q0p(3, opnd, enc_out); } /* q10: Q register at bit position 10 */ static inline bool decode_opnd_q10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_vector_reg(10, 4, enc, opnd); } static inline bool encode_opnd_q10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_vector_reg(10, 4, opnd, enc_out); } /* s0: S register at bit position 0 */ static inline bool decode_opnd_s0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_vector_reg(0, 2, enc, opnd); } static inline bool encode_opnd_s0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_vector_reg(0, 2, opnd, enc_out); } /* s10: S register at bit position 10 */ static inline bool decode_opnd_s10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_vector_reg(10, 2, enc, opnd); } static inline bool encode_opnd_s10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_vector_reg(10, 2, opnd, enc_out); } /* shift3: shift type for ADD/SUB: LSL, LSR or ASR */ static inline bool decode_opnd_shift3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { if (extract_uint(enc, 22, 2) == 3) return false; return decode_opnd_int(22, 2, false, 0, OPSZ_3b, DR_OPND_IS_SHIFT, enc, opnd); } static inline bool encode_opnd_shift3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { uint t; if (!encode_opnd_int(22, 2, false, 0, DR_OPND_IS_SHIFT, opnd, &t) || extract_uint(t, 22, 2) == 3) return false; *enc_out = t; return true; } /* shift4: shift type for logical operation: LSL, LSR, ASR or ROR */ static inline bool decode_opnd_shift4(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_int(22, 2, false, 0, OPSZ_3b, DR_OPND_IS_SHIFT, enc, opnd); } static inline bool encode_opnd_shift4(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_int(22, 2, false, 0, DR_OPND_IS_SHIFT, opnd, enc_out); } /* sysops: immediate operand for SYS instruction */ static inline bool decode_opnd_sysops(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_int(5, 14, false, 0, OPSZ_2, 0, enc, opnd); } static inline bool encode_opnd_sysops(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_int(5, 14, false, 0, 0, opnd, enc_out); } /* sysreg: system register, operand of MRS/MSR */ static inline bool decode_opnd_sysreg(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { *opnd = decode_sysreg(extract_uint(enc, 5, 15)); return true; } static inline bool encode_opnd_sysreg(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { uint t; if (!encode_sysreg(&t, opnd)) return false; *enc_out = t << 5; return true; } /* vmsz: B/H/S/D for load/store multiple structures */ static inline bool decode_opnd_vmsz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_int(10, 2, false, 0, OPSZ_2b, 0, enc, opnd); } static inline bool encode_opnd_vmsz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_int(10, 2, false, 0, 0, opnd, enc_out); } /* vt0: first register operand of SIMD load/store multiple structures */ static inline bool decode_opnd_vt0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_vtn(0, enc, opnd); } static inline bool encode_opnd_vt0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_vtn(0, enc, opnd, enc_out); } /* vt1: second register operand of SIMD load/store multiple structures */ static inline bool decode_opnd_vt1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_vtn(1, enc, opnd); } static inline bool encode_opnd_vt1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_vtn(1, enc, opnd, enc_out); } /* vt2: third register operand of SIMD load/store multiple structures */ static inline bool decode_opnd_vt2(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_vtn(2, enc, opnd); } static inline bool encode_opnd_vt2(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_vtn(2, enc, opnd, enc_out); } /* vt3: fourth register operand of SIMD load/store multiple structures */ static inline bool decode_opnd_vt3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_vtn(3, enc, opnd); } static inline bool encode_opnd_vt3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_vtn(3, enc, opnd, enc_out); } /* w0: W register or WZR at bit position 0 */ static inline bool decode_opnd_w0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_wxn(false, false, 0, enc, opnd); } static inline bool encode_opnd_w0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_wxn(false, false, 0, opnd, enc_out); } /* w0p0: even-numbered W register or WZR at bit position 0 */ static inline bool decode_opnd_w0p0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_wxnp(false, 0, 0, enc, opnd); } static inline bool encode_opnd_w0p0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_wxnp(false, 0, 0, opnd, enc_out); } /* w0p1: even-numbered W register or WZR at bit position 0, add 1 */ static inline bool decode_opnd_w0p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_wxnp(false, 1, 0, enc, opnd); } static inline bool encode_opnd_w0p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_wxnp(false, 1, 0, opnd, enc_out); } /* w10: W register or WZR at bit position 10 */ static inline bool decode_opnd_w10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_wxn(false, false, 10, enc, opnd); } static inline bool encode_opnd_w10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_wxn(false, false, 10, opnd, enc_out); } /* w16: W register or WZR at bit position 16 */ static inline bool decode_opnd_w16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_wxn(false, false, 16, enc, opnd); } static inline bool encode_opnd_w16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_wxn(false, false, 16, opnd, enc_out); } /* w16p0: even-numbered W register or WZR at bit position 16 */ static inline bool decode_opnd_w16p0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_wxnp(false, 0, 16, enc, opnd); } static inline bool encode_opnd_w16p0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_wxnp(false, 0, 16, opnd, enc_out); } /* w16p1: even-numbered W register or WZR at bit position 16, add 1 */ static inline bool decode_opnd_w16p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_wxnp(false, 1, 16, enc, opnd); } static inline bool encode_opnd_w16p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_wxnp(false, 1, 16, opnd, enc_out); } /* w5: W register or WZR at bit position 5 */ static inline bool decode_opnd_w5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_wxn(false, false, 5, enc, opnd); } static inline bool encode_opnd_w5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_wxn(false, false, 5, opnd, enc_out); } /* wx0: W/X register or WZR/XZR at bit position 0; bit 31 selects X reg */ static inline bool decode_opnd_wx0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_rn(false, 0, enc, opnd); } static inline bool encode_opnd_wx0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_rn(false, 0, opnd, enc_out); } /* wx0sp: W/X register or WSP/XSP at bit position 0; bit 31 selects X reg */ static inline bool decode_opnd_wx0sp(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_rn(true, 0, enc, opnd); } static inline bool encode_opnd_wx0sp(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_rn(true, 0, opnd, enc_out); } /* wx10: W/X register or WZR/XZR at bit position 10; bit 31 selects X reg */ static inline bool decode_opnd_wx10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_rn(false, 10, enc, opnd); } static inline bool encode_opnd_wx10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_rn(false, 10, opnd, enc_out); } /* wx16: W/X register or WZR/XZR at bit position 16; bit 31 selects X reg */ static inline bool decode_opnd_wx16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_rn(false, 16, enc, opnd); } static inline bool encode_opnd_wx16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_rn(false, 16, opnd, enc_out); } /* wx5: W/X register or WZR/XZR at bit position 5; bit 31 selects X reg */ static inline bool decode_opnd_wx5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_rn(false, 5, enc, opnd); } static inline bool encode_opnd_wx5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_rn(false, 5, opnd, enc_out); } /* wx5sp: W/X register or WSP/XSP at bit position 5; bit 31 selects X reg */ static inline bool decode_opnd_wx5sp(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_rn(true, 5, enc, opnd); } static inline bool encode_opnd_wx5sp(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_rn(true, 5, opnd, enc_out); } /* x0: X register or XZR at bit position 0 */ static inline bool decode_opnd_x0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_wxn(true, false, 0, enc, opnd); } static inline bool encode_opnd_x0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_wxn(true, false, 0, opnd, enc_out); } /* x0p0: even-numbered X register or XZR at bit position 0 */ static inline bool decode_opnd_x0p0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_wxnp(true, 0, 0, enc, opnd); } static inline bool encode_opnd_x0p0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_wxnp(true, 0, 0, opnd, enc_out); } /* x0p1: even-numbered X register or XZR at bit position 0, add 1 */ static inline bool decode_opnd_x0p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_wxnp(true, 1, 0, enc, opnd); } static inline bool encode_opnd_x0p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_wxnp(true, 1, 0, opnd, enc_out); } /* x10: X register or XZR at bit position 10 */ static inline bool decode_opnd_x10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_wxn(true, false, 10, enc, opnd); } static inline bool encode_opnd_x10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_wxn(true, false, 10, opnd, enc_out); } /* x16: X register or XZR at bit position 16 */ static inline bool decode_opnd_x16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_wxn(true, false, 16, enc, opnd); } static inline bool encode_opnd_x16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_wxn(true, false, 16, opnd, enc_out); } /* x16p0: even-numbered X register or XZR at bit position 16 */ static inline bool decode_opnd_x16p0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_wxnp(true, 0, 16, enc, opnd); } static inline bool encode_opnd_x16p0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_wxnp(true, 0, 16, opnd, enc_out); } /* x16p1: even-numbered X register or XZR at bit position 16, add 1 */ static inline bool decode_opnd_x16p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_wxnp(true, 1, 16, enc, opnd); } static inline bool encode_opnd_x16p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_wxnp(true, 1, 16, opnd, enc_out); } /* x16imm: immediate operand for SIMD load/store multiple structures (post-indexed) */ static inline bool decode_opnd_x16imm(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { int num = extract_uint(enc, 16, 5); if (num < 31) *opnd = opnd_create_reg(DR_REG_X0 + num); else { int bytes = (8 << extract_uint(enc, 30, 1)) * multistruct_regcount(enc); *opnd = opnd_create_immed_int(bytes, OPSZ_1); } return true; } static inline bool encode_opnd_x16imm(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { if (opnd_is_reg(opnd)) { uint num = opnd_get_reg(opnd) - DR_REG_X0; if (num == 31) return false; *enc_out = num << 16; return true; } else if (opnd_is_immed_int(opnd)) { ptr_int_t bytes = opnd_get_immed_int(opnd); int regs = multistruct_regcount(enc); if (bytes != regs * 8 && bytes != regs * 16) return false; *enc_out = 31U << 16 | (uint)(bytes == regs * 16) << 30; return true; } return false; } /* x16immvr: immediate operand for SIMD load structure and replicate (post-indexed) */ static inline bool decode_opnd_x16immvr(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { int num = extract_uint(enc, 16, 5); if (num < 31) *opnd = opnd_create_reg(DR_REG_X0 + num); else { int bytes = memvr_regcount(enc) << extract_uint(enc, 10, 2); *opnd = opnd_create_immed_int(bytes, OPSZ_1); } return true; } static inline bool encode_opnd_x16immvr(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { if (opnd_is_reg(opnd)) { uint num = opnd_get_reg(opnd) - DR_REG_X0; if (num == 31) return false; *enc_out = num << 16; return true; } else if (opnd_is_immed_int(opnd)) { ptr_int_t bytes = opnd_get_immed_int(opnd); if (bytes != memvr_regcount(enc) << extract_uint(enc, 10, 2)) return false; *enc_out = 31U << 16; return true; } return false; } /* x16immvs: immediate operand for SIMD load/store single structure (post-indexed) */ static inline bool decode_opnd_x16immvs(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { int num = extract_uint(enc, 16, 5); if (num < 31) *opnd = opnd_create_reg(DR_REG_X0 + num); else { int bytes = memvs_size(enc); *opnd = opnd_create_immed_int(bytes, OPSZ_1); } return true; } static inline bool encode_opnd_x16immvs(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { if (opnd_is_reg(opnd)) { uint num = opnd_get_reg(opnd) - DR_REG_X0; if (num == 31) return false; *enc_out = num << 16; return true; } else if (opnd_is_immed_int(opnd)) { ptr_int_t bytes = opnd_get_immed_int(opnd); if (bytes != memvs_size(enc)) return false; *enc_out = 31U << 16; return true; } return false; } /* x5: X register or XZR at position 5 */ static inline bool decode_opnd_x5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_wxn(true, false, 5, enc, opnd); } static inline bool encode_opnd_x5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_wxn(true, false, 5, opnd, enc_out); } /* x5: X register or XSP at position 5 */ static inline bool decode_opnd_x5sp(uint enc, int opcode, byte *pc, OUT opnd_t *opnd) { return decode_opnd_wxn(true, true, 5, enc, opnd); } static inline bool encode_opnd_x5sp(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out) { return encode_opnd_wxn(true, true, 5, opnd, enc_out); } /******************************************************************************* * Pairs of functions for decoding and encoding opndsets, as listed in "codec.txt". * Currently all branch instructions are handled in this way. */ /* b: used for B and BL */ static inline bool decode_opnds_b(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode) { instr_set_opcode(instr, opcode); instr_set_num_opnds(dcontext, instr, 0, 1); instr_set_src(instr, 0, opnd_create_pc(pc + extract_int(enc, 0, 26) * 4)); return true; } static inline uint encode_opnds_b(byte *pc, instr_t *instr, uint enc) { uint off; if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 1 && encode_pc_off(&off, 26, pc, instr, instr_get_src(instr, 0))) return (enc | off); return ENCFAIL; } /* bcond: used for B.cond */ static inline bool decode_opnds_bcond(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode) { instr_set_opcode(instr, opcode); instr_set_num_opnds(dcontext, instr, 0, 1); instr_set_src(instr, 0, opnd_create_pc(pc + extract_int(enc, 5, 19) * 4)); instr_set_predicate(instr, DR_PRED_EQ + (enc & 15)); return true; } static inline uint encode_opnds_bcond(byte *pc, instr_t *instr, uint enc) { uint off; if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 1 && encode_pc_off(&off, 19, pc, instr, instr_get_src(instr, 0)) && (uint)(instr_get_predicate(instr) - DR_PRED_EQ) < 16) return (enc | off << 5 | (instr_get_predicate(instr) - DR_PRED_EQ)); return ENCFAIL; } /* cbz: used for CBNZ and CBZ */ static inline bool decode_opnds_cbz(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode) { instr_set_opcode(instr, opcode); instr_set_num_opnds(dcontext, instr, 0, 2); instr_set_src(instr, 0, opnd_create_pc(pc + extract_int(enc, 5, 19) * 4)); instr_set_src(instr, 1, opnd_create_reg(decode_reg(extract_uint(enc, 0, 5), TEST(1U << 31, enc), false))); return true; } static inline uint encode_opnds_cbz(byte *pc, instr_t *instr, uint enc) { uint rt, off; if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 2 && encode_pc_off(&off, 19, pc, instr, instr_get_src(instr, 0)) && encode_opnd_rn(false, 0, instr_get_src(instr, 1), &rt)) return (enc | off << 5 | rt); return ENCFAIL; } /* logic_imm: used for AND, ANDS, EOR and ORR. * Logical (immediate) instructions are awkward because there are sometimes * many ways of representing the same immediate value. We add the raw encoding * as an additional operand when the encoding is not the canonical one. */ static inline bool decode_opnds_logic_imm(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode) { bool is_x = TEST(1U << 31, enc); uint imm_enc = extract_uint(enc, 10, 13); /* encoding of bitmask */ ptr_uint_t imm_val = decode_bitmask(imm_enc); /* value of bitmask */ bool canonical = encode_bitmask(imm_val) == imm_enc; if (imm_val == 0 || (!is_x && TEST(1U << 12, imm_enc))) return false; if (!is_x) imm_val &= 0xffffffff; instr_set_opcode(instr, opcode); instr_set_num_opnds(dcontext, instr, 1, 2 + (canonical ? 0 : 1)); instr_set_dst(instr, 0, opnd_create_reg(decode_reg(extract_uint(enc, 0, 5), is_x, opcode != OP_ands))); instr_set_src(instr, 0, opnd_create_reg(decode_reg(extract_uint(enc, 5, 5), is_x, false))); instr_set_src(instr, 1, opnd_create_immed_uint(imm_val, is_x ? OPSZ_8 : OPSZ_4)); if (!canonical) instr_set_src(instr, 2, opnd_create_immed_uint(imm_enc, OPSZ_2)); return true; } static inline uint encode_opnds_logic_imm(byte *pc, instr_t *instr, uint enc) { int opcode = instr_get_opcode(instr); int srcs = instr_num_srcs(instr); opnd_t opnd_val; ptr_uint_t imm_val; uint rd, rn; if (srcs < 2 || srcs > 3 || instr_num_dsts(instr) != 1) return ENCFAIL; opnd_val = instr_get_src(instr, 1); if (!encode_opnd_rn(opcode != OP_ands, 0, instr_get_dst(instr, 0), &rd) || !encode_opnd_rn(false, 5, instr_get_src(instr, 0), &rn) || TEST(1U << 31, rd ^ rn) || !opnd_is_immed_int(opnd_val)) return ENCFAIL; imm_val = opnd_get_immed_int(opnd_val); if (!TEST(1U << 31, rd)) { if ((imm_val >> 32) != 0) return ENCFAIL; imm_val |= imm_val << 32; } if (srcs == 3) { opnd_t opnd_enc = instr_get_src(instr, 2); ptr_int_t imm_enc; if (!opnd_is_immed_int(opnd_enc)) return ENCFAIL; imm_enc = opnd_get_immed_int(opnd_enc); if (imm_enc < 0 || imm_enc > 0x1fff || decode_bitmask(imm_enc) != imm_val) return ENCFAIL; return (enc | rd | rn | (uint)imm_enc << 10); } else { int imm_enc = encode_bitmask(imm_val); if (imm_enc < 0) return ENCFAIL; return (enc | rd | rn | (uint)imm_enc << 10); } } /* mst: used for MSR. * With MSR the destination register may or may not be one of the system registers * that we recognise. */ static inline bool decode_opnds_msr(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode) { opnd_t opnd = decode_sysreg(extract_uint(enc, 5, 15)); instr_set_opcode(instr, opcode); if (opnd_is_reg(opnd)) { instr_set_num_opnds(dcontext, instr, 1, 1); instr_set_dst(instr, 0, opnd); } else { instr_set_num_opnds(dcontext, instr, 0, 2); instr_set_src(instr, 1, opnd); } instr_set_src(instr, 0, opnd_create_reg(decode_reg(extract_uint(enc, 0, 5), true, false))); return true; } static inline uint encode_opnds_msr(byte *pc, instr_t *instr, uint enc) { uint imm15, xt; if (instr_num_dsts(instr) == 1 && instr_num_srcs(instr) == 1 && opnd_is_reg(instr_get_dst(instr, 0)) && encode_sysreg(&imm15, instr_get_dst(instr, 0)) && encode_opnd_wxn(true, false, 0, instr_get_src(instr, 0), &xt)) return (enc | xt | imm15 << 5); if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 2 && opnd_is_immed_int(instr_get_src(instr, 1)) && encode_opnd_wxn(true, false, 0, instr_get_src(instr, 0), &xt) && encode_sysreg(&imm15, instr_get_src(instr, 1))) return (enc | xt | imm15 << 5); return ENCFAIL; } /* tbz: used for TBNZ and TBZ */ static inline bool decode_opnds_tbz(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode) { instr_set_opcode(instr, opcode); instr_set_num_opnds(dcontext, instr, 0, 3); instr_set_src(instr, 0, opnd_create_pc(pc + extract_int(enc, 5, 14) * 4)); instr_set_src(instr, 1, opnd_create_reg(decode_reg(extract_uint(enc, 0, 5), true, false))); instr_set_src(instr, 2, opnd_create_immed_int((enc >> 19 & 31) | (enc >> 26 & 32), OPSZ_5b)); return true; } static inline uint encode_opnds_tbz(byte *pc, instr_t *instr, uint enc) { uint xt, imm6, off; if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 3 && encode_pc_off(&off, 14, pc, instr, instr_get_src(instr, 0)) && encode_opnd_wxn(true, false, 0, instr_get_src(instr, 1), &xt) && encode_opnd_int(0, 6, false, 0, 0, instr_get_src(instr, 2), &imm6)) return (enc | off << 5 | xt | (imm6 & 31) << 19 | (imm6 & 32) << 26); return ENCFAIL; } /******************************************************************************/ /* Include automatically generated decoder and encoder. */ #include "decode_gen.h" #include "encode_gen.h" /******************************************************************************/ byte * decode_common(dcontext_t *dcontext, byte *pc, byte *orig_pc, instr_t *instr) { byte *next_pc = pc + 4; uint enc = *(uint *)pc; CLIENT_ASSERT(instr->opcode == OP_INVALID || instr->opcode == OP_UNDECODED, "decode: instr is already decoded, may need to call instr_reset()"); if (!decoder(enc, dcontext, orig_pc, instr)) { /* We use OP_xx for instructions not yet handled by the decoder. * If an A64 instruction accesses a general-purpose register * (except X30) then the number of that register appears in one * of four possible places in the instruction word, so we can * pessimistically assume that an unrecognised instruction reads * and writes all four of those registers, and this is * sufficient to enable correct (though often excessive) mangling. */ instr_set_opcode(instr, OP_xx); instr_set_num_opnds(dcontext, instr, 4, 5); instr->src0 = OPND_CREATE_INT32(enc); instr->srcs[0] = opnd_create_reg(DR_REG_X0 + (enc & 31)); instr->dsts[0] = opnd_create_reg(DR_REG_X0 + (enc & 31)); instr->srcs[1] = opnd_create_reg(DR_REG_X0 + (enc >> 5 & 31)); instr->dsts[1] = opnd_create_reg(DR_REG_X0 + (enc >> 5 & 31)); instr->srcs[2] = opnd_create_reg(DR_REG_X0 + (enc >> 10 & 31)); instr->dsts[2] = opnd_create_reg(DR_REG_X0 + (enc >> 10 & 31)); instr->srcs[3] = opnd_create_reg(DR_REG_X0 + (enc >> 16 & 31)); instr->dsts[3] = opnd_create_reg(DR_REG_X0 + (enc >> 16 & 31)); } instr_set_operands_valid(instr, true); if (orig_pc != pc) { /* We do not want to copy when encoding and condone an invalid * relative target. */ instr_set_raw_bits_valid(instr, false); instr_set_translation(instr, orig_pc); } else { /* We set raw bits AFTER setting all srcs and dsts because setting * a src or dst marks instr as having invalid raw bits. */ ASSERT(CHECK_TRUNCATE_TYPE_uint(next_pc - pc)); instr_set_raw_bits(instr, pc, (uint)(next_pc - pc)); } return next_pc; } uint encode_common(byte *pc, instr_t *i) { uint enc; ASSERT(((ptr_int_t)pc & 3) == 0); enc = encoder(pc, i); if (enc != ENCFAIL) return enc; /* We use OP_xx for instructions not yet handled by the decoder. */ if (instr_get_opcode(i) == OP_xx) { ASSERT(instr_num_srcs(i) >= 1 && opnd_is_immed_int(instr_get_src(i, 0))); return opnd_get_immed_int(instr_get_src(i, 0)); } /* We were unable to encode this instruction. */ ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */ return enc; }
1
11,136
Isn't ENCFAIL used in codec.c below? Wouldn't this make it no longer compile?
DynamoRIO-dynamorio
c
@@ -73,6 +73,10 @@ public abstract class StaticLangXCombinedSurfaceView implements ViewModel { public abstract List<PageStreamingDescriptorClassView> pageStreamingDescriptorClasses(); + public boolean getTrue() { + return true; + } + public static Builder newBuilder() { return new AutoValue_StaticLangXCombinedSurfaceView.Builder(); }
1
/* Copyright 2016 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.viewmodel; import com.google.api.codegen.SnippetSetRunner; import com.google.auto.value.AutoValue; import java.util.Collection; import java.util.List; @AutoValue public abstract class StaticLangXCombinedSurfaceView implements ViewModel { @Override public abstract String templateFileName(); @Override public abstract String outputPath(); @Override public String resourceRoot() { return SnippetSetRunner.SNIPPET_RESOURCE_ROOT; } public abstract Integer servicePort(); public abstract List<String> imports(); public abstract Iterable<String> authScopes(); public abstract List<PathTemplateView> pathTemplates(); public abstract List<PathTemplateGetterFunctionView> pathTemplateGetters(); public abstract List<RetryConfigDefinitionView> retryPairDefinitions(); public abstract List<StaticLangApiMethodView> apiMethods(); public abstract String clientTypeName(); public abstract String clientConstructorName(); public abstract List<GrpcStubView> stubs(); public abstract String callOptionsTypeName(); public abstract String defaultClientOptionFunctionName(); public abstract String defaultCallOptionFunctionName(); public abstract String servicePhraseName(); public abstract String serviceOriginalName(); public abstract List<ApiCallSettingsView> callSettings(); public abstract List<String> serviceDoc(); public abstract String localPackageName(); public abstract String serviceAddress(); public abstract List<PageStreamingDescriptorClassView> pageStreamingDescriptorClasses(); public static Builder newBuilder() { return new AutoValue_StaticLangXCombinedSurfaceView.Builder(); } @AutoValue.Builder public static abstract class Builder { public abstract Builder apiMethods(List<StaticLangApiMethodView> val); public abstract Builder imports(List<String> val); public abstract Builder authScopes(Iterable<String> val); public abstract Builder clientTypeName(String val); public abstract Builder clientConstructorName(String val); public abstract Builder stubs(List<GrpcStubView> val); public abstract Builder callOptionsTypeName(String val); public abstract Builder defaultClientOptionFunctionName(String val); public abstract Builder defaultCallOptionFunctionName(String val); public abstract Builder servicePhraseName(String val); public abstract Builder serviceOriginalName(String val); public abstract Builder outputPath(String val); public abstract Builder localPackageName(String val); public abstract Builder callSettings(List<ApiCallSettingsView> callSettings); public abstract Builder pathTemplates(List<PathTemplateView> val); public abstract Builder retryPairDefinitions(List<RetryConfigDefinitionView> val); public abstract Builder serviceDoc(List<String> val); public abstract Builder serviceAddress(String val); public abstract Builder pathTemplateGetters(List<PathTemplateGetterFunctionView> val); public abstract Builder servicePort(Integer val); public abstract Builder templateFileName(String val); public abstract Builder pageStreamingDescriptorClasses( List<PageStreamingDescriptorClassView> val); public abstract StaticLangXCombinedSurfaceView build(); } }
1
18,250
this looks very weird
googleapis-gapic-generator
java
@@ -14,7 +14,9 @@ import java.util.logging.LogRecord; * Log to the console using a basic formatter. * * @author Wouter Zelle + * @deprecated This class will be complety removed in 7.0.0 */ +@Deprecated public class ConsoleLogHandler extends Handler { private static final Formatter FORMATTER = new PmdLogFormatter();
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd.util.log; import java.io.PrintWriter; import java.io.StringWriter; import java.util.logging.Formatter; import java.util.logging.Handler; import java.util.logging.LogRecord; /** * Log to the console using a basic formatter. * * @author Wouter Zelle */ public class ConsoleLogHandler extends Handler { private static final Formatter FORMATTER = new PmdLogFormatter(); @Override public void publish(LogRecord logRecord) { System.out.println(FORMATTER.format(logRecord)); if (logRecord.getThrown() != null) { // Use the same channel, to make sure that the stacktrace comes // after the message on the console (using printStackTrace // directly messes things up) StringWriter stringWriter = new StringWriter(); PrintWriter printWriter = new PrintWriter(stringWriter, true); logRecord.getThrown().printStackTrace(printWriter); System.out.println(stringWriter.toString()); } } @Override public void close() throws SecurityException { // nothing to do } @Override public void flush() { System.out.flush(); } }
1
13,964
So, the culprit was actually this class "ConsoleLogHandler", correct? Because it simply wrote to stdout...
pmd-pmd
java
@@ -272,7 +272,7 @@ namespace NLog.Config public override LoggingConfiguration Reload() { if (!string.IsNullOrEmpty(_originalFileName)) - return new XmlLoggingConfiguration(_originalFileName, LogFactory); + return LogFactory.CreateConfig(_originalFileName); //TODO NLog 5: should call LoadConfiguration which set LogFactory.Configuration else return base.Reload(); }
1
// // Copyright (c) 2004-2020 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of Jaroslaw Kowalski nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. // namespace NLog.Config { using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Xml; using NLog.Common; using NLog.Internal; using NLog.Layouts; using JetBrains.Annotations; #if SILVERLIGHT // ReSharper disable once RedundantUsingDirective using System.Windows; #endif /// <summary> /// A class for configuring NLog through an XML configuration file /// (App.config style or App.nlog style). /// /// Parsing of the XML file is also implemented in this class. /// </summary> ///<remarks> /// - This class is thread-safe.<c>.ToList()</c> is used for that purpose. /// - Update TemplateXSD.xml for changes outside targets /// </remarks> public class XmlLoggingConfiguration : LoggingConfigurationParser { #if __ANDROID__ /// <summary> /// Prefix for assets in Xamarin Android /// </summary> private const string AssetsPrefix = "assets/"; #endif private readonly Dictionary<string, bool> _fileMustAutoReloadLookup = new Dictionary<string, bool>(StringComparer.OrdinalIgnoreCase); private string _originalFileName; private readonly Stack<string> _currentFilePath = new Stack<string>(); /// <summary> /// Initializes a new instance of the <see cref="XmlLoggingConfiguration" /> class. /// </summary> /// <param name="fileName">Configuration file to be read.</param> public XmlLoggingConfiguration([NotNull] string fileName) : this(fileName, LogManager.LogFactory) { } /// <summary> /// Initializes a new instance of the <see cref="XmlLoggingConfiguration" /> class. /// </summary> /// <param name="fileName">Configuration file to be read.</param> /// <param name="logFactory">The <see cref="LogFactory" /> to which to apply any applicable configuration values.</param> public XmlLoggingConfiguration([NotNull] string fileName, LogFactory logFactory) : base(logFactory) { using (XmlReader reader = CreateFileReader(fileName)) { Initialize(reader, fileName); } } /// <summary> /// Initializes a new instance of the <see cref="XmlLoggingConfiguration" /> class. /// </summary> /// <param name="fileName">Configuration file to be read.</param> /// <param name="ignoreErrors">Ignore any errors during configuration.</param> [Obsolete("Constructor with parameter ignoreErrors has limited effect. Instead use LogManager.ThrowConfigExceptions. Marked obsolete in NLog 4.7")] public XmlLoggingConfiguration([NotNull] string fileName, bool ignoreErrors) : this(fileName, ignoreErrors, LogManager.LogFactory) { } /// <summary> /// Initializes a new instance of the <see cref="XmlLoggingConfiguration" /> class. /// </summary> /// <param name="fileName">Configuration file to be read.</param> /// <param name="ignoreErrors">Ignore any errors during configuration.</param> /// <param name="logFactory">The <see cref="LogFactory" /> to which to apply any applicable configuration values.</param> [Obsolete("Constructor with parameter ignoreErrors has limited effect. Instead use LogManager.ThrowConfigExceptions. Marked obsolete in NLog 4.7")] public XmlLoggingConfiguration([NotNull] string fileName, bool ignoreErrors, LogFactory logFactory) : base(logFactory) { using (XmlReader reader = CreateFileReader(fileName)) { Initialize(reader, fileName, ignoreErrors); } } /// <summary> /// Initializes a new instance of the <see cref="XmlLoggingConfiguration" /> class. /// </summary> /// <param name="reader">XML reader to read from.</param> public XmlLoggingConfiguration([NotNull] XmlReader reader) : this(reader, null) { } /// <summary> /// Initializes a new instance of the <see cref="XmlLoggingConfiguration" /> class. /// </summary> /// <param name="reader"><see cref="XmlReader"/> containing the configuration section.</param> /// <param name="fileName">Name of the file that contains the element (to be used as a base for including other files). <c>null</c> is allowed.</param> public XmlLoggingConfiguration([NotNull] XmlReader reader, [CanBeNull] string fileName) : this(reader, fileName, LogManager.LogFactory) { } /// <summary> /// Initializes a new instance of the <see cref="XmlLoggingConfiguration" /> class. /// </summary> /// <param name="reader"><see cref="XmlReader"/> containing the configuration section.</param> /// <param name="fileName">Name of the file that contains the element (to be used as a base for including other files). <c>null</c> is allowed.</param> /// <param name="logFactory">The <see cref="LogFactory" /> to which to apply any applicable configuration values.</param> public XmlLoggingConfiguration([NotNull] XmlReader reader, [CanBeNull] string fileName, LogFactory logFactory) : base(logFactory) { Initialize(reader, fileName); } /// <summary> /// Initializes a new instance of the <see cref="XmlLoggingConfiguration" /> class. /// </summary> /// <param name="reader"><see cref="XmlReader"/> containing the configuration section.</param> /// <param name="fileName">Name of the file that contains the element (to be used as a base for including other files). <c>null</c> is allowed.</param> /// <param name="ignoreErrors">Ignore any errors during configuration.</param> [Obsolete("Constructor with parameter ignoreErrors has limited effect. Instead use LogManager.ThrowConfigExceptions. Marked obsolete in NLog 4.7")] public XmlLoggingConfiguration([NotNull] XmlReader reader, [CanBeNull] string fileName, bool ignoreErrors) : this(reader, fileName, ignoreErrors, LogManager.LogFactory) { } /// <summary> /// Initializes a new instance of the <see cref="XmlLoggingConfiguration" /> class. /// </summary> /// <param name="reader"><see cref="XmlReader"/> containing the configuration section.</param> /// <param name="fileName">Name of the file that contains the element (to be used as a base for including other files). <c>null</c> is allowed.</param> /// <param name="ignoreErrors">Ignore any errors during configuration.</param> /// <param name="logFactory">The <see cref="LogFactory" /> to which to apply any applicable configuration values.</param> [Obsolete("Constructor with parameter ignoreErrors has limited effect. Instead use LogManager.ThrowConfigExceptions. Marked obsolete in NLog 4.7")] public XmlLoggingConfiguration([NotNull] XmlReader reader, [CanBeNull] string fileName, bool ignoreErrors, LogFactory logFactory) : base(logFactory) { Initialize(reader, fileName, ignoreErrors); } /// <summary> /// Initializes a new instance of the <see cref="XmlLoggingConfiguration" /> class. /// </summary> /// <param name="xmlContents">The XML contents.</param> /// <param name="fileName">Name of the XML file.</param> /// <param name="logFactory">The <see cref="LogFactory" /> to which to apply any applicable configuration values.</param> internal XmlLoggingConfiguration([NotNull] string xmlContents, [CanBeNull] string fileName, LogFactory logFactory) : base(logFactory) { using (var stringReader = new StringReader(xmlContents)) { using (XmlReader reader = XmlReader.Create(stringReader)) { Initialize(reader, fileName); } } } /// <summary> /// Parse XML string as NLog configuration /// </summary> /// <param name="xml">NLog configuration in XML to be parsed</param> public static XmlLoggingConfiguration CreateFromXmlString(string xml) { return CreateFromXmlString(xml, LogManager.LogFactory); } /// <summary> /// Parse XML string as NLog configuration /// </summary> /// <param name="xml">NLog configuration in XML to be parsed</param> /// <param name="logFactory">NLog LogFactory</param> public static XmlLoggingConfiguration CreateFromXmlString(string xml, LogFactory logFactory) { return new XmlLoggingConfiguration(xml, string.Empty, logFactory); } #if !SILVERLIGHT && !__IOS__ && !__ANDROID__ && !NETSTANDARD /// <summary> /// Gets the default <see cref="LoggingConfiguration" /> object by parsing /// the application configuration file (<c>app.exe.config</c>). /// </summary> public static LoggingConfiguration AppConfig { get { object o = System.Configuration.ConfigurationManager.GetSection("nlog"); return o as LoggingConfiguration; } } #endif /// <summary> /// Did the <see cref="Initialize"/> Succeeded? <c>true</c>= success, <c>false</c>= error, <c>null</c> = initialize not started yet. /// </summary> public bool? InitializeSucceeded { get; private set; } /// <summary> /// Gets or sets a value indicating whether all of the configuration files /// should be watched for changes and reloaded automatically when changed. /// </summary> public bool AutoReload { get { if (_fileMustAutoReloadLookup.Count == 0) return false; else return _fileMustAutoReloadLookup.Values.All(mustAutoReload => mustAutoReload); } set { var autoReloadFiles = _fileMustAutoReloadLookup.Keys.ToList(); foreach (string nextFile in autoReloadFiles) _fileMustAutoReloadLookup[nextFile] = value; } } /// <summary> /// Gets the collection of file names which should be watched for changes by NLog. /// This is the list of configuration files processed. /// If the <c>autoReload</c> attribute is not set it returns empty collection. /// </summary> public override IEnumerable<string> FileNamesToWatch { get { return _fileMustAutoReloadLookup.Where(entry => entry.Value).Select(entry => entry.Key); } } /// <summary> /// Re-reads the original configuration file and returns the new <see cref="LoggingConfiguration" /> object. /// </summary> /// <returns>The new <see cref="XmlLoggingConfiguration" /> object.</returns> public override LoggingConfiguration Reload() { if (!string.IsNullOrEmpty(_originalFileName)) return new XmlLoggingConfiguration(_originalFileName, LogFactory); else return base.Reload(); } /// <summary> /// Get file paths (including filename) for the possible NLog config files. /// </summary> /// <returns>The file paths to the possible config file</returns> public static IEnumerable<string> GetCandidateConfigFilePaths() { return LogManager.LogFactory.GetCandidateConfigFilePaths(); } /// <summary> /// Overwrite the paths (including filename) for the possible NLog config files. /// </summary> /// <param name="filePaths">The file paths to the possible config file</param> public static void SetCandidateConfigFilePaths(IEnumerable<string> filePaths) { LogManager.LogFactory.SetCandidateConfigFilePaths(filePaths); } /// <summary> /// Clear the candidate file paths and return to the defaults. /// </summary> public static void ResetCandidateConfigFilePath() { LogManager.LogFactory.ResetCandidateConfigFilePath(); } /// <summary> /// Create XML reader for (xml config) file. /// </summary> /// <param name="fileName">filepath</param> /// <returns>reader or <c>null</c> if filename is empty.</returns> private XmlReader CreateFileReader(string fileName) { if (!string.IsNullOrEmpty(fileName)) { fileName = fileName.Trim(); #if __ANDROID__ //support loading config from special assets folder in nlog.config if (fileName.StartsWith(AssetsPrefix, StringComparison.OrdinalIgnoreCase)) { //remove prefix fileName = fileName.Substring(AssetsPrefix.Length); Stream stream = Android.App.Application.Context.Assets.Open(fileName); return XmlReader.Create(stream); } #endif return LogFactory.CurrentAppEnvironment.LoadXmlFile(fileName); } return null; } /// <summary> /// Initializes the configuration. /// </summary> /// <param name="reader"><see cref="XmlReader"/> containing the configuration section.</param> /// <param name="fileName">Name of the file that contains the element (to be used as a base for including other files). <c>null</c> is allowed.</param> /// <param name="ignoreErrors">Ignore any errors during configuration.</param> private void Initialize([NotNull] XmlReader reader, [CanBeNull] string fileName, bool ignoreErrors = false) { try { InitializeSucceeded = null; _originalFileName = fileName; reader.MoveToContent(); var content = new NLogXmlElement(reader); if (!string.IsNullOrEmpty(fileName)) { InternalLogger.Info("Configuring from an XML element in {0}...", fileName); ParseTopLevel(content, fileName, autoReloadDefault: false); } else { ParseTopLevel(content, null, autoReloadDefault: false); } InitializeSucceeded = true; CheckParsingErrors(content); } catch (Exception exception) { InitializeSucceeded = false; if (exception.MustBeRethrownImmediately()) { throw; } var configurationException = new NLogConfigurationException(exception, "Exception when parsing {0}. ", fileName); InternalLogger.Error(exception, configurationException.Message); if (!ignoreErrors && (LogFactory.ThrowConfigExceptions ?? LogFactory.ThrowExceptions || configurationException.MustBeRethrown())) throw configurationException; } } /// <summary> /// Checks whether any error during XML configuration parsing has occured. /// If there are any and <c>ThrowConfigExceptions</c> or <c>ThrowExceptions</c> /// setting is enabled - throws <c>NLogConfigurationException</c>, otherwise /// just write an internal log at Warn level. /// </summary> /// <param name="rootContentElement">Root NLog configuration xml element</param> private void CheckParsingErrors(NLogXmlElement rootContentElement) { var parsingErrors = rootContentElement.GetParsingErrors().ToArray(); if (parsingErrors.Any()) { if (LogManager.ThrowConfigExceptions ?? LogManager.ThrowExceptions) { string exceptionMessage = string.Join(Environment.NewLine, parsingErrors); throw new NLogConfigurationException(exceptionMessage); } else { foreach (var parsingError in parsingErrors) { InternalLogger.Log(LogLevel.Warn, parsingError); } } } } /// <summary> /// Add a file with configuration. Check if not already included. /// </summary> /// <param name="fileName"></param> /// <param name="autoReloadDefault"></param> private void ConfigureFromFile([NotNull] string fileName, bool autoReloadDefault) { if (!_fileMustAutoReloadLookup.ContainsKey(GetFileLookupKey(fileName))) { using (var reader = LogFactory.CurrentAppEnvironment.LoadXmlFile(fileName)) { reader.MoveToContent(); ParseTopLevel(new NLogXmlElement(reader, true), fileName, autoReloadDefault); } } } /// <summary> /// Parse the root /// </summary> /// <param name="content"></param> /// <param name="filePath">path to config file.</param> /// <param name="autoReloadDefault">The default value for the autoReload option.</param> private void ParseTopLevel(NLogXmlElement content, [CanBeNull] string filePath, bool autoReloadDefault) { content.AssertName("nlog", "configuration"); switch (content.LocalName.ToUpperInvariant()) { case "CONFIGURATION": ParseConfigurationElement(content, filePath, autoReloadDefault); break; case "NLOG": ParseNLogElement(content, filePath, autoReloadDefault); break; } } /// <summary> /// Parse {configuration} xml element. /// </summary> /// <param name="configurationElement"></param> /// <param name="filePath">path to config file.</param> /// <param name="autoReloadDefault">The default value for the autoReload option.</param> private void ParseConfigurationElement(NLogXmlElement configurationElement, [CanBeNull] string filePath, bool autoReloadDefault) { InternalLogger.Trace("ParseConfigurationElement"); configurationElement.AssertName("configuration"); var nlogElements = configurationElement.Elements("nlog").ToList(); foreach (var nlogElement in nlogElements) { ParseNLogElement(nlogElement, filePath, autoReloadDefault); } } /// <summary> /// Parse {NLog} xml element. /// </summary> /// <param name="nlogElement"></param> /// <param name="filePath">path to config file.</param> /// <param name="autoReloadDefault">The default value for the autoReload option.</param> private void ParseNLogElement(ILoggingConfigurationElement nlogElement, [CanBeNull] string filePath, bool autoReloadDefault) { InternalLogger.Trace("ParseNLogElement"); nlogElement.AssertName("nlog"); bool autoReload = nlogElement.GetOptionalBooleanValue("autoReload", autoReloadDefault); if (!string.IsNullOrEmpty(filePath)) _fileMustAutoReloadLookup[GetFileLookupKey(filePath)] = autoReload; try { _currentFilePath.Push(filePath); base.LoadConfig(nlogElement, Path.GetDirectoryName(filePath)); } finally { _currentFilePath.Pop(); } } /// <summary> /// Parses a single config section within the NLog-config /// </summary> /// <param name="configSection"></param> /// <returns>Section was recognized</returns> protected override bool ParseNLogSection(ILoggingConfigurationElement configSection) { if (configSection.MatchesName("include")) { string filePath = _currentFilePath.Peek(); bool autoLoad = !string.IsNullOrEmpty(filePath) && _fileMustAutoReloadLookup[GetFileLookupKey(filePath)]; ParseIncludeElement(configSection, !string.IsNullOrEmpty(filePath) ? Path.GetDirectoryName(filePath) : null, autoLoad); return true; } else { return base.ParseNLogSection(configSection); } } private void ParseIncludeElement(ILoggingConfigurationElement includeElement, string baseDirectory, bool autoReloadDefault) { includeElement.AssertName("include"); string newFileName = includeElement.GetRequiredValue("file", "nlog"); var ignoreErrors = includeElement.GetOptionalBooleanValue("ignoreErrors", false); try { newFileName = ExpandSimpleVariables(newFileName); newFileName = SimpleLayout.Evaluate(newFileName); var fullNewFileName = newFileName; if (baseDirectory != null) { fullNewFileName = Path.Combine(baseDirectory, newFileName); } #if SILVERLIGHT && !WINDOWS_PHONE newFileName = newFileName.Replace("\\", "/"); if (Application.GetResourceStream(new Uri(fullNewFileName, UriKind.Relative)) != null) #else if (File.Exists(fullNewFileName)) #endif { InternalLogger.Debug("Including file '{0}'", fullNewFileName); ConfigureFromFile(fullNewFileName, autoReloadDefault); } else { //is mask? if (newFileName.Contains("*")) { ConfigureFromFilesByMask(baseDirectory, newFileName, autoReloadDefault); } else { if (ignoreErrors) { //quick stop for performances InternalLogger.Debug("Skipping included file '{0}' as it can't be found", fullNewFileName); return; } throw new FileNotFoundException("Included file not found: " + fullNewFileName); } } } catch (Exception exception) { if (exception.MustBeRethrownImmediately()) { throw; } var configurationException = new NLogConfigurationException(exception, "Error when including '{0}'.", newFileName); InternalLogger.Error(exception, configurationException.Message); if (!ignoreErrors) throw configurationException; } } /// <summary> /// Include (multiple) files by filemask, e.g. *.nlog /// </summary> /// <param name="baseDirectory">base directory in case if <paramref name="fileMask"/> is relative</param> /// <param name="fileMask">relative or absolute fileMask</param> /// <param name="autoReloadDefault"></param> private void ConfigureFromFilesByMask(string baseDirectory, string fileMask, bool autoReloadDefault) { var directory = baseDirectory; //if absolute, split to file mask and directory. if (Path.IsPathRooted(fileMask)) { directory = Path.GetDirectoryName(fileMask); if (directory == null) { InternalLogger.Warn("directory is empty for include of '{0}'", fileMask); return; } var filename = Path.GetFileName(fileMask); if (filename == null) { InternalLogger.Warn("filename is empty for include of '{0}'", fileMask); return; } fileMask = filename; } #if SILVERLIGHT && !WINDOWS_PHONE var files = Directory.EnumerateFiles(directory, fileMask); #else var files = Directory.GetFiles(directory, fileMask); #endif foreach (var file in files) { //note we exclude our self in ConfigureFromFile ConfigureFromFile(file, autoReloadDefault); } } private static string GetFileLookupKey([NotNull] string fileName) { #if SILVERLIGHT && !WINDOWS_PHONE // file names are relative to XAP return fileName; #else return Path.GetFullPath(fileName); #endif } /// <inheritdoc /> public override string ToString() { return $"{base.ToString()}, FilePath={_originalFileName}"; } } }
1
21,314
Still curious why you need to modify this method? Why not in the future just have a method called `LogFactory.ReloadConfiguration()` instead of the config assigning itself?
NLog-NLog
.cs
@@ -110,8 +110,8 @@ class Test(base.Base): 'Molecule run (always).')) def test(ctx, scenario_name, driver_name, __all, destroy): # pragma: no cover """ - Test (lint, destroy, dependency, syntax, create, prepare, converge, - idempotence, side_effect, verify, destroy). + Test (lint, cleanup, destroy, dependency, syntax, create, prepare, + converge, idempotence, side_effect, verify, cleanup, destroy). """ args = ctx.obj.get('args')
1
# Copyright (c) 2015-2018 Cisco Systems, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import click from molecule import config from molecule import logger from molecule import scenarios from molecule import util from molecule.command import base LOG = logger.get_logger(__name__) class Test(base.Base): """ .. program:: molecule test .. option:: molecule test Target the default scenario. .. program:: molecule test --scenario-name foo .. option:: molecule test --scenario-name foo Targeting a specific scenario. .. program:: molecule test --all .. option:: molecule test --all Target all scenarios. .. program:: molecule test --destroy=always .. option:: molecule test --destroy=always Always destroy instances at the conclusion of a Molecule run. .. program:: molecule --debug test .. option:: molecule --debug test Executing with `debug`. .. program:: molecule --base-config base.yml test .. option:: molecule --base-config base.yml test Executing with a `base-config`. .. program:: molecule --env-file foo.yml test .. option:: molecule --env-file foo.yml test Load an env file to read variables from when rendering molecule.yml. """ def execute(self): """ Execute the actions necessary to perform a `molecule test` and returns None. :return: None """ @click.command() @click.pass_context @click.option( '--scenario-name', '-s', default=base.MOLECULE_DEFAULT_SCENARIO_NAME, help='Name of the scenario to target. ({})'.format( base.MOLECULE_DEFAULT_SCENARIO_NAME)) @click.option( '--driver-name', '-d', type=click.Choice(config.molecule_drivers()), help='Name of driver to use. (docker)') @click.option( '--all/--no-all', '__all', default=False, help='Test all scenarios. Default is False.') @click.option( '--destroy', type=click.Choice(['always', 'never']), default='always', help=('The destroy strategy used at the conclusion of a ' 'Molecule run (always).')) def test(ctx, scenario_name, driver_name, __all, destroy): # pragma: no cover """ Test (lint, destroy, dependency, syntax, create, prepare, converge, idempotence, side_effect, verify, destroy). """ args = ctx.obj.get('args') subcommand = base._get_subcommand(__name__) command_args = { 'destroy': destroy, 'subcommand': subcommand, 'driver_name': driver_name, } if __all: scenario_name = None s = scenarios.Scenarios( base.get_configs(args, command_args), scenario_name) s.print_matrix() for scenario in s: try: for action in scenario.sequence: scenario.config.action = action base.execute_subcommand(scenario.config, action) except SystemExit: if destroy == 'always': msg = ('An error occurred during the test sequence ' "action: '{}'. Cleaning up.").format(action) LOG.warn(msg) base.execute_subcommand(scenario.config, 'destroy') util.sysexit() raise
1
9,178
Should this say "lint, cleanup, destroy, dependency, syntax, create, prepare, converge, idempotence, side_effect, verify, cleanup, destroy"?
ansible-community-molecule
py
@@ -23,12 +23,11 @@ func sendHeaders(ctx context.Context, headers p2p.Headers, stream *stream) error defer cancel() if err := w.WriteMsgWithContext(ctx, headersP2PToPB(headers)); err != nil { - return fmt.Errorf("write message: %w", err) + return fmt.Errorf("send write message: %w", err) } - h := new(pb.Headers) if err := r.ReadMsgWithContext(ctx, h); err != nil { - return fmt.Errorf("read message: %w", err) + return fmt.Errorf("send read message: %w", err) } stream.headers = headersPBToP2P(h)
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package libp2p import ( "context" "fmt" "time" "github.com/ethersphere/bee/pkg/p2p" "github.com/ethersphere/bee/pkg/p2p/libp2p/internal/headers/pb" "github.com/ethersphere/bee/pkg/p2p/protobuf" ) var sendHeadersTimeout = 10 * time.Second func sendHeaders(ctx context.Context, headers p2p.Headers, stream *stream) error { w, r := protobuf.NewWriterAndReader(stream) ctx, cancel := context.WithTimeout(ctx, sendHeadersTimeout) defer cancel() if err := w.WriteMsgWithContext(ctx, headersP2PToPB(headers)); err != nil { return fmt.Errorf("write message: %w", err) } h := new(pb.Headers) if err := r.ReadMsgWithContext(ctx, h); err != nil { return fmt.Errorf("read message: %w", err) } stream.headers = headersPBToP2P(h) return nil } func handleHeaders(headler p2p.HeadlerFunc, stream *stream) error { w, r := protobuf.NewWriterAndReader(stream) ctx, cancel := context.WithTimeout(context.Background(), sendHeadersTimeout) defer cancel() headers := new(pb.Headers) if err := r.ReadMsgWithContext(ctx, headers); err != nil { return fmt.Errorf("read message: %w", err) } stream.headers = headersPBToP2P(headers) var h p2p.Headers if headler != nil { h = headler(stream.headers) } if err := w.WriteMsgWithContext(ctx, headersP2PToPB(h)); err != nil { return fmt.Errorf("write message: %w", err) } return nil } func headersPBToP2P(h *pb.Headers) p2p.Headers { p2ph := make(p2p.Headers) for _, rh := range h.Headers { p2ph[rh.Key] = rh.Value } return p2ph } func headersP2PToPB(h p2p.Headers) *pb.Headers { pbh := new(pb.Headers) pbh.Headers = make([]*pb.Header, 0) for key, value := range h { pbh.Headers = append(pbh.Headers, &pb.Header{ Key: key, Value: value, }) } return pbh }
1
13,385
I am not sure why these log messages are changed.
ethersphere-bee
go
@@ -5,7 +5,10 @@ package runtime // Interfaces are represented as a pair of {typecode, value}, where value can be // anything (including non-pointers). -import "unsafe" +import ( + "reflect" + "unsafe" +) type _interface struct { typecode uintptr
1
package runtime // This file implements Go interfaces. // // Interfaces are represented as a pair of {typecode, value}, where value can be // anything (including non-pointers). import "unsafe" type _interface struct { typecode uintptr value unsafe.Pointer } //go:inline func composeInterface(typecode uintptr, value unsafe.Pointer) _interface { return _interface{typecode, value} } //go:inline func decomposeInterface(i _interface) (uintptr, unsafe.Pointer) { return i.typecode, i.value } // Return true iff both interfaces are equal. func interfaceEqual(x, y _interface) bool { if x.typecode != y.typecode { // Different dynamic type so always unequal. return false } if x.typecode == 0 { // Both interfaces are nil, so they are equal. return true } // TODO: depends on reflection. panic("unimplemented: interface equality") } // interfaceTypeAssert is called when a type assert without comma-ok still // returns false. func interfaceTypeAssert(ok bool) { if !ok { runtimePanic("type assert failed") } } // The following declarations are only used during IR construction. They are // lowered to inline IR in the interface lowering pass. // See compiler/interface-lowering.go for details. type interfaceMethodInfo struct { signature *uint8 // external *i8 with a name identifying the Go function signature funcptr uintptr // bitcast from the actual function pointer } type typecodeID struct { // Depending on the type kind of this typecodeID, this pointer is something // different: // * basic types: null // * named type: the underlying type // * interface: null // * chan/pointer/slice/array: the element type // * struct: bitcast of global with structField array // * func/map: TODO references *typecodeID // The array length, for array types. length uintptr } // structField is used by the compiler to pass information to the interface // lowering pass. It is not used in the final binary. type structField struct { typecode *typecodeID // type of this struct field name *uint8 // pointer to char array tag *uint8 // pointer to char array, or nil embedded bool } // Pseudo type used before interface lowering. By using a struct instead of a // function call, this is simpler to reason about during init interpretation // than a function call. Also, by keeping the method set around it is easier to // implement interfaceImplements in the interp package. type typeInInterface struct { typecode *typecodeID methodSet *interfaceMethodInfo // nil or a GEP of an array } // Pseudo function call used during a type assert. It is used during interface // lowering, to assign the lowest type numbers to the types with the most type // asserts. Also, it is replaced with const false if this type assert can never // happen. func typeAssert(actualType uintptr, assertedType *typecodeID) bool // Pseudo function call that returns whether a given type implements all methods // of the given interface. func interfaceImplements(typecode uintptr, interfaceMethodSet **uint8) bool // Pseudo function that returns a function pointer to the method to call. // See the interface lowering pass for how this is lowered to a real call. func interfaceMethod(typecode uintptr, interfaceMethodSet **uint8, signature *uint8) uintptr
1
7,617
This panic is incorrect - this is a full implementation of a slice equality check. This should be something more like `type x is not comparable` maybe?
tinygo-org-tinygo
go
@@ -271,7 +271,7 @@ add_extra_option(opt_info_t *opt_info, const TCHAR *opt) return DR_FAILURE; } - len = MIN(DR_MAX_OPTIONS_LENGTH - 1, _tcslen(opt)); + len = _dr_nlen(opt, DR_MAX_OPTIONS_LENGTH); opt_info->extra_opts[idx] = malloc((len + 1) * sizeof(opt_info->extra_opts[idx][0])); _tcsncpy(opt_info->extra_opts[idx], opt, len);
1
/* ********************************************************** * Copyright (c) 2011-2016 Google, Inc. All rights reserved. * Copyright (c) 2008-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #include <stdlib.h> /* for malloc */ #include <stdio.h> #include <string.h> #include "utils.h" #include "share.h" #include "utils.h" #include "dr_config.h" #include "our_tchar.h" #include "dr_frontend.h" #ifdef WINDOWS # include <windows.h> # include <io.h> /* for _get_osfhandle */ # include "processes.h" # include "mfapi.h" /* for PLATFORM_WIN_2000 */ # define RELEASE32_DLL _TEXT("\\lib32\\release\\dynamorio.dll") # define DEBUG32_DLL _TEXT("\\lib32\\debug\\dynamorio.dll") # define RELEASE64_DLL _TEXT("\\lib64\\release\\dynamorio.dll") # define DEBUG64_DLL _TEXT("\\lib64\\debug\\dynamorio.dll") # define LOG_SUBDIR _TEXT("\\logs") # define LIB32_SUBDIR _TEXT("\\lib32") # define PREINJECT32_DLL _TEXT("\\lib32\\drpreinject.dll") # define PREINJECT64_DLL _TEXT("\\lib64\\drpreinject.dll") # define _snprintf d_r_snprintf # undef _sntprintf # define _sntprintf d_r_snprintf_wide int d_r_snprintf(char *s, size_t max, const char *fmt, ...); int d_r_snprintf_wide(wchar_t *s, size_t max, const wchar_t *fmt, ...); #else # include <sys/stat.h> # include <sys/types.h> # include <unistd.h> # if defined(MACOS) || defined(ANDROID) # include <sys/syscall.h> # else # include <syscall.h> # endif # define RELEASE32_DLL "/lib32/release/libdynamorio.so" # define DEBUG32_DLL "/lib32/debug/libdynamorio.so" # define RELEASE64_DLL "/lib64/release/libdynamorio.so" # define DEBUG64_DLL "/lib64/debug/libdynamorio.so" # define LOG_SUBDIR "/logs" # define LIB32_SUBDIR "/lib32/" extern bool create_nudge_signal_payload(siginfo_t *info OUT, uint action_mask, client_id_t client_id, uint64 client_arg); #endif /* The minimum option size is 3, e.g., "-x ". Note that we need the * NULL term too so "-x -y" needs 6 characters. */ #define MAX_NUM_OPTIONS (DR_MAX_OPTIONS_LENGTH / 3) /* Upper bound on the length of an fopen mode string, including the null * terminator. For example, "rw+b\0" or "ra\0". */ enum { MAX_MODE_STRING_SIZE = 16 }; /* Data structs to hold info about the DYNAMORIO_OPTION registry entry */ typedef struct _client_opt_t { TCHAR *path; client_id_t id; TCHAR *opts; } client_opt_t; typedef struct _opt_info_t { dr_operation_mode_t mode; TCHAR *extra_opts[MAX_NUM_OPTIONS]; size_t num_extra_opts; /* note that clients are parsed and stored in priority order */ client_opt_t *client_opts[MAX_CLIENT_LIBS]; size_t num_clients; } opt_info_t; /* Does a straight copy when TCHAR is char, and a widening conversion when * TCHAR is wchar_t. Does not null terminate for use in buffered printing. */ static void convert_to_tchar(TCHAR *dst, const char *src, size_t dst_sz) { #ifdef _UNICODE # ifdef DEBUG int res = # endif MultiByteToWideChar(CP_UTF8, 0 /*=>MB_PRECOMPOSED*/, src, -1 /*null-term*/, dst, (int)dst_sz); DO_ASSERT(res > 0 && "convert_to_tchar failed"); #else strncpy(dst, src, dst_sz); #endif } /* Function to iterate over the options in a DYNAMORIO_OPTIONS string. * For the purposes of this function, we're not differentiating * between an option and an option argument. We're simply looking for * space-separated strings while taking into account that some strings * can be quoted. 'ptr' should point to the current location in the * options string; the option is copied to 'token' */ static TCHAR * get_next_token(TCHAR *ptr, TCHAR *token) { /* advance to next non-space character */ while (*ptr == _T(' ')) { ptr++; } /* check for end-of-string */ if (*ptr == _T('\0')) { token[0] = _T('\0'); return NULL; } /* for quoted options, copy until the closing quote */ if (*ptr == _T('\"') || *ptr == _T('\'') || *ptr == _T('`')) { TCHAR quote = *ptr; *token++ = *ptr++; while (*ptr != _T('\0')) { *token++ = *ptr++; if (ptr[-1] == quote && ptr[-2] != _T('\\')) { break; } } } /* otherwise copy until the next space character */ else { while (*ptr != _T(' ') && *ptr != _T('\0')) { *token++ = *ptr++; } } *token = _T('\0'); return ptr; } /* Allocate a new client_opt_t */ static client_opt_t * new_client_opt(const TCHAR *path, client_id_t id, const TCHAR *opts) { size_t len; client_opt_t *opt = (client_opt_t *)malloc(sizeof(client_opt_t)); if (opt == NULL) { return NULL; } opt->id = id; len = MIN(MAXIMUM_PATH - 1, _tcslen(path)); opt->path = malloc((len + 1) * sizeof(opt->path[0])); _tcsncpy(opt->path, path, len); opt->path[len] = _T('\0'); len = MIN(DR_MAX_OPTIONS_LENGTH - 1, _tcslen(opts)); opt->opts = malloc((len + 1) * sizeof(opt->opts[0])); _tcsncpy(opt->opts, opts, len); opt->opts[len] = _T('\0'); return opt; } /* Free a client_opt_t */ static void free_client_opt(client_opt_t *opt) { if (opt == NULL) { return; } if (opt->path != NULL) { free(opt->path); } if (opt->opts != NULL) { free(opt->opts); } free(opt); } /* Add another client to an opt_info_t struct */ static dr_config_status_t add_client_lib(opt_info_t *opt_info, client_id_t id, size_t pri, const TCHAR *path, const TCHAR *opts) { size_t i; if (opt_info->num_clients >= MAX_CLIENT_LIBS) { return DR_FAILURE; } if (pri > opt_info->num_clients) { return DR_ID_INVALID; } /* shift existing entries to make space for the new client info */ for (i = opt_info->num_clients; i > pri; i--) { opt_info->client_opts[i] = opt_info->client_opts[i - 1]; } opt_info->client_opts[pri] = new_client_opt(path, id, opts); opt_info->num_clients++; return DR_SUCCESS; } static dr_config_status_t remove_client_lib(opt_info_t *opt_info, client_id_t id) { size_t i, j; for (i = 0; i < opt_info->num_clients; i++) { if (opt_info->client_opts[i]->id == id) { free_client_opt(opt_info->client_opts[i]); /* shift remaining entries down */ for (j = i; j < opt_info->num_clients - 1; j++) { opt_info->client_opts[j] = opt_info->client_opts[j + 1]; } opt_info->num_clients--; return DR_SUCCESS; } } return DR_ID_INVALID; } /* Add an 'extra' option (non-client related option) to an opt_info_t struct */ static dr_config_status_t add_extra_option(opt_info_t *opt_info, const TCHAR *opt) { if (opt != NULL && opt[0] != _T('\0')) { size_t idx, len; idx = opt_info->num_extra_opts; if (idx >= MAX_NUM_OPTIONS) { return DR_FAILURE; } len = MIN(DR_MAX_OPTIONS_LENGTH - 1, _tcslen(opt)); opt_info->extra_opts[idx] = malloc((len + 1) * sizeof(opt_info->extra_opts[idx][0])); _tcsncpy(opt_info->extra_opts[idx], opt, len); opt_info->extra_opts[idx][len] = _T('\0'); opt_info->num_extra_opts++; } return DR_SUCCESS; } static dr_config_status_t add_extra_option_char(opt_info_t *opt_info, const char *opt) { if (opt != NULL && opt[0] != '\0') { TCHAR wbuf[DR_MAX_OPTIONS_LENGTH]; convert_to_tchar(wbuf, opt, DR_MAX_OPTIONS_LENGTH); NULL_TERMINATE_BUFFER(wbuf); return add_extra_option(opt_info, wbuf); } return DR_SUCCESS; } /* Free allocated memory in an opt_info_t */ static void free_opt_info(opt_info_t *opt_info) { size_t i; for (i = 0; i < opt_info->num_clients; i++) { free_client_opt(opt_info->client_opts[i]); } for (i = 0; i < opt_info->num_extra_opts; i++) { free(opt_info->extra_opts[i]); } } /*************************************************************************** * i#85/PR 212034 and i#265/PR 486139: use config files * * The API uses char* here but be careful b/c this file is build w/ UNICODE * so we use *A versions of Windows API routines. * Also note that I left many types as TCHAR for less change in handling * PARAMS_IN_REGISTRY and config files: eventually should convert all * to char. */ #ifdef PARAMS_IN_REGISTRY # define IF_REG_ELSE(x, y) x # define PARAM_STR(name) L_IF_WIN(name) #else # define PARAM_STR(name) name # define IF_REG_ELSE(x, y) y #endif #ifndef PARAMS_IN_REGISTRY /* DYNAMORIO_VAR_CONFIGDIR is searched first, and then these: */ # ifdef WINDOWS # define LOCAL_CONFIG_ENV "USERPROFILE" # define LOCAL_CONFIG_SUBDIR "dynamorio" # else # define LOCAL_CONFIG_ENV "HOME" # define LOCAL_CONFIG_SUBDIR ".dynamorio" # endif # define GLOBAL_CONFIG_SUBDIR "config" # define CFG_SFX_64 "config64" # define CFG_SFX_32 "config32" # ifdef X64 # define CFG_SFX CFG_SFX_64 # else # define CFG_SFX CFG_SFX_32 # endif static const char * get_config_sfx(dr_platform_t dr_platform) { if (dr_platform == DR_PLATFORM_DEFAULT) return CFG_SFX; else if (dr_platform == DR_PLATFORM_32BIT) return CFG_SFX_32; else if (dr_platform == DR_PLATFORM_64BIT) return CFG_SFX_64; else DO_ASSERT(false); return ""; } static bool env_var_exists(const char *name, char *buf, size_t buflen) { return drfront_get_env_var(name, buf, buflen) == DRFRONT_SUCCESS; } static bool is_config_dir_valid(const char *dir) { /* i#1701 Android support: on Android devices (and in some cases ChromeOS), * $HOME is read-only. Thus we want to check for writability. */ bool ret = false; return drfront_access(dir, DRFRONT_WRITE, &ret) == DRFRONT_SUCCESS && ret; } /* If find_temp, will use a temp dir; else will fail if no standard config dir. */ static bool get_config_dir(bool global, char *fname, size_t fname_len, bool find_temp) { char dir[MAXIMUM_PATH]; const char *subdir = ""; bool res = false; /* We return the last-tried dir on failure */ NULL_TERMINATE_BUFFER(dir); fname[0] = '\0'; if (global) { # ifdef WINDOWS _snprintf(dir, BUFFER_SIZE_ELEMENTS(dir), TSTR_FMT, get_dynamorio_home()); NULL_TERMINATE_BUFFER(dir); subdir = GLOBAL_CONFIG_SUBDIR; # else /* FIXME i#840: Support global config files by porting more of utils.c. */ return false; # endif } else { /* DYNAMORIO_CONFIGDIR takes precedence, and we do not check for * is_config_dir_valid() b/c the user explicitly asked for it. * The user can set TMPDIR if checks are desired. */ if (!env_var_exists(DYNAMORIO_VAR_CONFIGDIR, dir, BUFFER_SIZE_ELEMENTS(dir))) { if (!env_var_exists(LOCAL_CONFIG_ENV, dir, BUFFER_SIZE_ELEMENTS(dir)) || !is_config_dir_valid(dir)) { if (!find_temp) goto get_config_dir_done; /* Attempt to make things work for non-interactive users (i#939) */ if ((!env_var_exists("TMP", dir, BUFFER_SIZE_ELEMENTS(dir)) || !is_config_dir_valid(dir)) && (!env_var_exists("TEMP", dir, BUFFER_SIZE_ELEMENTS(dir)) || !is_config_dir_valid(dir)) && (!env_var_exists("TMPDIR", dir, BUFFER_SIZE_ELEMENTS(dir)) || !is_config_dir_valid(dir))) { # ifdef WINDOWS /* There is no straightforward hardcoded fallback for temp dirs * on Windows. But for that reason even a sandbox will leave * TMP and/or TEMP set so we don't expect to hit this case. */ goto get_config_dir_done; # else # ifdef ANDROID /* This dir is not always present, but often is. * We can't easily query the Java layer for the "cache dir". * DrMi#1857: for Android apps, this is disallowed by SELinux * (and we found no way to chcon to fix that), which does allow * /sdcard but it's not world-writable. We have to rely on the * user setting TMPDIR to the app's data dir. */ # define TMP_DIR "/data/local/tmp" # else # define TMP_DIR "/tmp" # endif /* Prefer /tmp to cwd as the former is more likely writable */ strncpy(dir, TMP_DIR, BUFFER_SIZE_ELEMENTS(dir)); NULL_TERMINATE_BUFFER(dir); if ((!file_exists(dir) || !is_config_dir_valid(dir)) && /* Prefer getcwd over PWD env var which is not always set * (e.g., on Android, it's in "adb shell" but not child) */ (getcwd(dir, BUFFER_SIZE_ELEMENTS(dir)) == NULL || !is_config_dir_valid(dir))) { # ifdef ANDROID /* Put back TMP_DIR for better error msg in caller */ strncpy(dir, TMP_DIR, BUFFER_SIZE_ELEMENTS(dir)); NULL_TERMINATE_BUFFER(dir); # endif goto get_config_dir_done; } # endif } } /* For anon config files (.0config32), we set DYNAMORIO_VAR_CONFIGDIR to be * either LOCAL_CONFIG_ENV or TMP to ensure DR finds the same config file! */ # ifdef WINDOWS { TCHAR wbuf[MAXIMUM_PATH]; convert_to_tchar(wbuf, dir, BUFFER_SIZE_ELEMENTS(wbuf)); NULL_TERMINATE_BUFFER(wbuf); if (!SetEnvironmentVariableW(L_DYNAMORIO_VAR_CONFIGDIR, wbuf)) goto get_config_dir_done; } # else if (setenv(DYNAMORIO_VAR_CONFIGDIR, dir, 1 /*replace*/) != 0) goto get_config_dir_done; # endif } subdir = LOCAL_CONFIG_SUBDIR; } res = true; get_config_dir_done: /* On failure, we still want to copy the last-tried dir out so drdeploy can have a * nicer error msg. */ _snprintf(fname, fname_len, "%s/%s", dir, subdir); fname[fname_len - 1] = '\0'; return res; } /* No support yet here to create some types of files the core supports: * - system config dir by reading home reg key: plan is to * add a global setting to use that, so no change to params in API * - default0.config */ static bool get_config_file_name(const char *process_name, process_id_t pid, bool global, dr_platform_t dr_platform, char *fname, size_t fname_len) { size_t dir_len; # ifdef WINDOWS drfront_status_t res; # endif /* i#939: we can't fall back to tmp dirs here b/c it's too late to set * the DYNAMORIO_CONFIGDIR env var (child is already created). */ if (!get_config_dir(global, fname, fname_len, false)) { DO_ASSERT(false && "get_config_dir failed: check permissions"); return false; } # ifdef WINDOWS /* make sure subdir exists*/ res = drfront_create_dir(fname); if (res != DRFRONT_SUCCESS && res != DRFRONT_ERROR_FILE_EXISTS) { DO_ASSERT(false && "failed to create subdir: check permissions"); return false; } # else { struct stat st; /* DrMi#1857: with both native and wrapped Android apps using the same * config dir but running as different users, we need the dir to be * world-writable (this is when SELinux is disabled and a common config * dir is used). */ mkdir(fname, IF_ANDROID_ELSE(0777, 0770)); # ifdef ANDROID chmod(fname, 0777); /* umask probably stripped out o+w, so we chmod */ # endif if (stat(fname, &st) != 0 || !S_ISDIR(st.st_mode)) { DO_ASSERT(false && "failed to create subdir: check permissions"); return false; } } # endif dir_len = strlen(fname); if (pid > 0) { /* <root>/appname.<pid>.1config */ _snprintf(fname + dir_len, fname_len - dir_len, "/%s.%d.1%s", process_name, pid, get_config_sfx(dr_platform)); } else { /* <root>/appname.config */ _snprintf(fname + dir_len, fname_len - dir_len, "/%s.%s", process_name, get_config_sfx(dr_platform)); } fname[fname_len - 1] = '\0'; return true; } static FILE * open_config_file(const char *process_name, process_id_t pid, bool global, dr_platform_t dr_platform, bool read, bool write, bool overwrite) { TCHAR wfname[MAXIMUM_PATH]; char fname[MAXIMUM_PATH]; TCHAR mode[MAX_MODE_STRING_SIZE]; int i = 0; FILE *f; DO_ASSERT(read || write); DO_ASSERT(!(read && overwrite) && "read+overwrite incompatible"); if (!get_config_file_name(process_name, pid, global, dr_platform, fname, BUFFER_SIZE_ELEMENTS(fname))) { DO_ASSERT(false && "get_config_file_name failed"); return NULL; } /* XXX: Checking for existence before opening is racy. */ convert_to_tchar(wfname, fname, BUFFER_SIZE_ELEMENTS(wfname)); NULL_TERMINATE_BUFFER(wfname); if (!read && write && !overwrite && file_exists(wfname)) { return NULL; } /* Careful, Windows fopen aborts the process on invalid mode strings. * Order matters. Modifiers like 'b' have to come after standard characters * like r, w, and +. */ if (read) mode[i++] = _T('r'); if (write) mode[i++] = (read ? _T('+') : _T('w')); mode[i++] = _T('b'); /* Avoid CRLF translation on Windows. */ mode[i++] = _T('\0'); DO_ASSERT(i <= BUFFER_SIZE_ELEMENTS(mode)); NULL_TERMINATE_BUFFER(mode); f = _wfopen(wfname, mode); return f; } static void trim_trailing_newline(TCHAR *line) { TCHAR *cur = line + _tcslen(line) - 1; while (cur >= line && (*cur == _T('\n') || *cur == _T('\r'))) { *cur = _T('\0'); cur--; } } /* Copies the value for var, converted to a TCHAR, into val. If elide is true, * also overwrites var and its value in the file with all lines subsequent, * allowing for a simple append to change the value (the file must have been * opened with both read and write access). */ static bool read_config_ex(FILE *f, const char *var, TCHAR *val, size_t val_len, bool elide) { bool found = false; /* FIXME: share code w/ core/config.c */ # define BUFSIZE (MAX_CONFIG_VALUE + 128) char line[BUFSIZE]; size_t var_len = strlen(var); /* Offsets into the file for the start and end of var. */ size_t var_start = 0; size_t var_end = 0; /* each time we start from beginning: we assume a small file */ if (f == NULL) return false; if (fseek(f, 0, SEEK_SET)) return false; while (fgets(line, BUFFER_SIZE_ELEMENTS(line), f) != NULL) { fflush(stdout); /* Find lines starting with VAR=. */ if (strncmp(line, var, var_len) == 0 && line[var_len] == '=') { found = true; var_end = var_start + strlen(line); if (val != NULL) { convert_to_tchar(val, line + var_len + 1, val_len); val[val_len - 1] = _T('\0'); trim_trailing_newline(val); } break; } var_start += strlen(line); fflush(stdout); } /* If elide is true, seek back to the line, delete it, and shift the rest of * the file backward. It's easier to do this with a fixed size buffer than * it is to it line-by-line with fgets/fputs. */ if (found && elide) { /* Use long instead of ssize_t to match FILE* API. */ long write_cur = (long)var_start; long read_cur = (long)var_end; long bufread; fflush(stdout); while (true) { fseek(f, read_cur, SEEK_SET); bufread = (long)fread(line, 1, BUFFER_SIZE_ELEMENTS(line), f); DO_ASSERT(ferror(f) == 0); line[bufread] = '\0'; fflush(stdout); fseek(f, write_cur, SEEK_SET); if (bufread > 0) { fwrite(line, 1, bufread, f); DO_ASSERT(ferror(f) == 0); read_cur += bufread; write_cur += bufread; } else { break; } } # ifdef WINDOWS /* XXX: Can't find a way to truncate the file at the current position * using the FILE API. */ SetEndOfFile((HANDLE)_get_osfhandle(_fileno(f))); # else { int r; off_t pos = lseek(fileno(f), 0, SEEK_CUR); if (pos == -1) return false; r = ftruncate(fileno(f), (off_t)pos); if (r != 0) return false; } # endif } return found; } /* for simplest coexistence with PARAMS_IN_REGISTRY taking in TCHAR and * converting to char. not very efficient though. */ static dr_config_status_t write_config_param(FILE *f, const char *var, const TCHAR *val) { size_t written; int len; char buf[MAX_CONFIG_VALUE]; DO_ASSERT(f != NULL); len = _snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "%s=" TSTR_FMT "\n", var, val); /* don't remove the newline: better to truncate options than to have none (i#547) */ buf[BUFFER_SIZE_ELEMENTS(buf) - 2] = '\n'; buf[BUFFER_SIZE_ELEMENTS(buf) - 1] = '\0'; written = fwrite(buf, 1, strlen(buf), f); DO_ASSERT(written == strlen(buf)); if (len < 0) return DR_CONFIG_STRING_TOO_LONG; if (written != strlen(buf)) return DR_CONFIG_FILE_WRITE_FAILED; return DR_SUCCESS; } static bool read_config_param(FILE *f, const char *var, TCHAR *val, size_t val_len) { return read_config_ex(f, var, val, val_len, false); } #else /* !PARAMS_IN_REGISTRY */ static dr_config_status_t write_config_param(ConfigGroup *policy, const TCHAR *var, const TCHAR *val) { set_config_group_parameter(policy, var, val); return DR_SUCCESS; } static bool read_config_param(FILE *f, const char *var, const TCHAR *val, size_t val_len) { TCHAR *ptr = get_config_group_parameter(proc_policy, L_DYNAMORIO_VAR_OPTIONS); if (ptr == NULL) return false; _sntprintf(val, val_len, _TEXT("%s"), ptr); return true; } #endif /* PARAMS_IN_REGISTRY */ /***************************************************************************/ /* Read a DYNAMORIO_OPTIONS string from 'wbuf' and populate an opt_info_t struct */ static dr_config_status_t read_options(opt_info_t *opt_info, IF_REG_ELSE(ConfigGroup *proc_policy, FILE *f)) { TCHAR buf[MAX_CONFIG_VALUE]; TCHAR *ptr, token[DR_MAX_OPTIONS_LENGTH], tmp[DR_MAX_OPTIONS_LENGTH]; opt_info_t null_opt_info = { 0, }; size_t len; *opt_info = null_opt_info; if (!read_config_param(IF_REG_ELSE(proc_policy, f), PARAM_STR(DYNAMORIO_VAR_OPTIONS), buf, BUFFER_SIZE_ELEMENTS(buf))) return DR_FAILURE; ptr = buf; /* We'll be safe and not trust that get_config_group_parameter * returns a nice NULL-terminated string with no more than * DR_MAX_OPTIONS_LENGTH characters. Making sure we have such a * string makes get_next_token() simpler. A more efficient * approach would be to keep track of a string length and pass * that to get_next_token(). */ len = MIN(DR_MAX_OPTIONS_LENGTH - 1, _tcslen(ptr)); _tcsncpy(tmp, ptr, len); tmp[len] = _T('\0'); opt_info->mode = DR_MODE_NONE; ptr = tmp; while (ptr != NULL) { ptr = get_next_token(ptr, token); /* * look for the mode */ if (_tcscmp(token, _TEXT("-code_api")) == 0) { if (opt_info->mode != DR_MODE_NONE && /* allow dup options (i#1448) */ opt_info->mode != DR_MODE_CODE_MANIPULATION) { goto error; } opt_info->mode = DR_MODE_CODE_MANIPULATION; } #ifdef MF_API else if (_tcscmp(token, _TEXT("-security_api")) == 0) { if (opt_info->mode != DR_MODE_NONE && opt_info->mode != DR_MODE_MEMORY_FIREWALL) { goto error; } opt_info->mode = DR_MODE_MEMORY_FIREWALL; } #endif else if (_tcscmp(token, _TEXT("-probe_api")) == 0) { #ifdef PROBE_API /* nothing; we assign the mode when we see -code_api */ #else /* we shouldn't see -probe_api */ goto error; #endif } #ifdef PROBE_API else if (_tcscmp(token, _TEXT("-hotp_only")) == 0) { if (opt_info->mode != DR_MODE_NONE && opt_info->mode != DR_MODE_PROBE) { goto error; } opt_info->mode = DR_MODE_PROBE; } #endif /* * look for client options */ else if (_tcscmp(token, _TEXT("-client_lib")) == 0) { TCHAR *path_str, *id_str, *opt_str; client_id_t id; ptr = get_next_token(ptr, token); if (ptr == NULL) { goto error; } /* handle enclosing quotes */ path_str = token; if (path_str[0] == _T('\"') || path_str[0] == _T('\'') || path_str[0] == _T('`')) { TCHAR quote = path_str[0]; size_t last; path_str++; last = _tcslen(path_str) - 1; if (path_str[last] != quote) { goto error; } path_str[last] = _T('\0'); } /* -client_lib options should have the form path;ID;options. * Client priority is left-to-right. */ id_str = _tcsstr(path_str, _TEXT(";")); if (id_str == NULL) { goto error; } *id_str = _T('\0'); id_str++; opt_str = _tcsstr(id_str, _TEXT(";")); if (opt_str == NULL) { goto error; } *opt_str = _T('\0'); opt_str++; /* client IDs are in hex */ id = _tcstoul(id_str, NULL, 16); /* add the client info to our opt_info structure */ if (add_client_lib(opt_info, id, opt_info->num_clients, path_str, opt_str) != DR_SUCCESS) { goto error; } } /* * Any remaining options are not related to clients. Put all * these options (and their arguments) in one array. */ else { if (add_extra_option(opt_info, token) != DR_SUCCESS) { goto error; } } } fflush(stdout); return DR_SUCCESS; error: free_opt_info(opt_info); *opt_info = null_opt_info; return DR_FAILURE; } /* Write the options stored in an opt_info_t to 'wbuf' in the form expected * by the DYNAMORIO_OPTIONS registry entry. */ static dr_config_status_t write_options(opt_info_t *opt_info, TCHAR *wbuf) { size_t i; const char *mode_str = ""; ssize_t len; ssize_t sofar = 0; ssize_t bufsz = DR_MAX_OPTIONS_LENGTH; /* NOTE - mode_str must come first since we want to give * client-supplied options the chance to override (for * ex. -stack_size which -code_api sets, see PR 247436 */ switch (opt_info->mode) { #ifdef MF_API case DR_MODE_MEMORY_FIREWALL: mode_str = "-security_api"; break; #endif case DR_MODE_CODE_MANIPULATION: #ifdef PROBE_API mode_str = "-code_api -probe_api"; #else mode_str = "-code_api"; #endif break; #ifdef PROBE_API case DR_MODE_PROBE: mode_str = "-probe_api -hotp_only"; break; #endif case DR_MODE_DO_NOT_RUN: /* this is a mode b/c can't add dr_register_process param w/o breaking * backward compat, so just ignore in terms of options, user has to * re-reg anyway to re-enable and can specify mode then. */ mode_str = ""; break; default: #ifndef CLIENT_INTERFACE /* no API's so no added options */ mode_str = ""; break; #else DO_ASSERT(false); #endif } len = _sntprintf(wbuf + sofar, bufsz - sofar, _TEXT(TSTR_FMT), mode_str); if (len >= 0 && len <= bufsz - sofar) sofar += len; /* extra options */ for (i = 0; i < opt_info->num_extra_opts; i++) { /* FIXME: Note that we're blindly allowing any options * provided so we can allow users to specify "undocumented" * options. Maybe we should be checking that the options are * actually valid? */ len = _sntprintf(wbuf + sofar, bufsz - sofar, _TEXT(" %s"), opt_info->extra_opts[i]); if (len >= 0 && len <= bufsz - sofar) sofar += len; } /* client lib options */ for (i = 0; i < opt_info->num_clients; i++) { client_opt_t *client_opts = opt_info->client_opts[i]; /* i#1542: pick a delimiter that avoids conflicts w/ the client strings */ char delim = '\"'; if (strchr(client_opts->path, delim) || strchr(client_opts->opts, delim)) { delim = '\''; if (strchr(client_opts->path, delim) || strchr(client_opts->opts, delim)) { delim = '`'; if (strchr(client_opts->path, delim) || strchr(client_opts->opts, delim)) { return DR_CONFIG_OPTIONS_INVALID; } } } /* no ; allowed */ if (strchr(client_opts->path, ';') || strchr(client_opts->opts, ';')) return DR_CONFIG_OPTIONS_INVALID; len = _sntprintf(wbuf + sofar, bufsz - sofar, _TEXT(" -client_lib %c%s;%x;%s%c"), delim, client_opts->path, client_opts->id, client_opts->opts, delim); if (len >= 0 && len <= bufsz - sofar) sofar += len; } wbuf[DR_MAX_OPTIONS_LENGTH - 1] = _T('\0'); return DR_SUCCESS; } #ifdef PARAMS_IN_REGISTRY /* caller must call free_config_group() on the returned ConfigGroup */ static ConfigGroup * get_policy(dr_platform_t dr_platform) { ConfigGroup *policy; /* PR 244206: set the registry view before any registry access. * If we are ever part of a persistent agent maybe we should * restore to the default platform at the end of this routine? */ set_dr_platform(dr_platform); if (read_config_group(&policy, L_PRODUCT_NAME, TRUE) != ERROR_SUCCESS) { return NULL; } return policy; } /* As a sub policy only the parent policy (from get_policy()) need be freed */ static ConfigGroup * get_proc_policy(ConfigGroup *policy, const char *process_name) { ConfigGroup *res = NULL; if (policy != NULL) { TCHAR wbuf[MAXIMUM_PATH]; convert_to_tchar(wbuf, process_name, MAXIMUM_PATH); NULL_TERMINATE_BUFFER(wbuf); res = get_child(wbuf, policy); } return res; } #endif /* PARAMS_IN_REGISTRY */ static bool platform_is_64bit(dr_platform_t platform) { return (platform == DR_PLATFORM_64BIT IF_X64(|| platform == DR_PLATFORM_DEFAULT)); } /* FIXME i#840: Syswide NYI for Linux. */ #ifdef WINDOWS static void get_syswide_path(TCHAR *wbuf, const char *dr_root_dir) { TCHAR path[MAXIMUM_PATH]; int len; if (!platform_is_64bit(get_dr_platform())) _sntprintf(path, MAXIMUM_PATH, _TEXT("%S") PREINJECT32_DLL, dr_root_dir); else _sntprintf(path, MAXIMUM_PATH, _TEXT("%S") PREINJECT64_DLL, dr_root_dir); path[MAXIMUM_PATH - 1] = '\0'; /* spaces are separator in AppInit so use short path */ len = GetShortPathName(path, wbuf, MAXIMUM_PATH); DO_ASSERT(len > 0); wbuf[MAXIMUM_PATH - 1] = _T('\0'); } dr_config_status_t dr_register_syswide(dr_platform_t dr_platform, const char *dr_root_dir) { TCHAR wbuf[MAXIMUM_PATH]; set_dr_platform(dr_platform); /* Set the appinit key */ get_syswide_path(wbuf, dr_root_dir); /* Always overwrite, in case we have an older drpreinject version in there */ if (set_custom_autoinjection(wbuf, APPINIT_OVERWRITE) != ERROR_SUCCESS || (is_vista() && set_loadappinit() != ERROR_SUCCESS)) { return DR_FAILURE; } return DR_SUCCESS; } dr_config_status_t dr_unregister_syswide(dr_platform_t dr_platform, const char *dr_root_dir) { TCHAR wbuf[MAXIMUM_PATH]; set_dr_platform(dr_platform); /* Set the appinit key */ get_syswide_path(wbuf, dr_root_dir); if (unset_custom_autoinjection(wbuf, APPINIT_OVERWRITE) != ERROR_SUCCESS) return DR_FAILURE; /* We leave Vista loadappinit on */ return DR_SUCCESS; } bool dr_syswide_is_on(dr_platform_t dr_platform, const char *dr_root_dir) { TCHAR wbuf[MAXIMUM_PATH]; set_dr_platform(dr_platform); /* Set the appinit key */ get_syswide_path(wbuf, dr_root_dir); return CAST_TO_bool(is_custom_autoinjection_set(wbuf)); } #endif /* WINDOWS */ dr_config_status_t dr_register_process(const char *process_name, process_id_t pid, bool global, const char *dr_root_dir, dr_operation_mode_t dr_mode, bool debug, dr_platform_t dr_platform, const char *dr_options) { #ifdef PARAMS_IN_REGISTRY ConfigGroup *policy, *proc_policy; #else FILE *f; #endif TCHAR wbuf[MAX(MAXIMUM_PATH, DR_MAX_OPTIONS_LENGTH)]; IF_WINDOWS(DWORD platform;) opt_info_t opt_info = { 0, }; dr_config_status_t status; #ifdef PARAMS_IN_REGISTRY /* PR 244206: set the registry view before any registry access. * If we are ever part of a persistent agent maybe we should * restore to the default platform at the end of this routine? */ set_dr_platform(dr_platform); /* create top-level Determina/SecureCore key */ if (create_root_key() != ERROR_SUCCESS) { return DR_FAILURE; } if (read_config_group(&policy, L_PRODUCT_NAME, TRUE) != ERROR_SUCCESS) { return DR_FAILURE; } /* create process key */ convert_to_tchar(wbuf, process_name, MAXIMUM_PATH); NULL_TERMINATE_BUFFER(wbuf); proc_policy = get_child(wbuf, policy); if (proc_policy == NULL) { proc_policy = new_config_group(wbuf); add_config_group(policy, proc_policy); } else { return DR_PROC_REG_EXISTS; } #else f = open_config_file(process_name, pid, global, dr_platform, false /*!read*/, true /*write*/, pid != 0 /*overwrite for pid-specific*/); if (f == NULL) { # ifdef WINDOWS int err = GetLastError(); if (err == ERROR_ALREADY_EXISTS) return DR_PROC_REG_EXISTS; else # endif return DR_CONFIG_DIR_NOT_FOUND; } #endif /* set the rununder string */ _sntprintf(wbuf, MAXIMUM_PATH, (dr_mode == DR_MODE_DO_NOT_RUN) ? _TEXT("0") : _TEXT("1")); NULL_TERMINATE_BUFFER(wbuf); status = write_config_param(IF_REG_ELSE(proc_policy, f), PARAM_STR(DYNAMORIO_VAR_RUNUNDER), wbuf); DO_ASSERT(status == DR_SUCCESS); if (status != DR_SUCCESS) return status; /* set the autoinject string (i.e., path to dynamorio.dll */ if (debug) { if (!platform_is_64bit(dr_platform)) _sntprintf(wbuf, MAXIMUM_PATH, _TEXT(TSTR_FMT) DEBUG32_DLL, dr_root_dir); else _sntprintf(wbuf, MAXIMUM_PATH, _TEXT(TSTR_FMT) DEBUG64_DLL, dr_root_dir); } else { if (!platform_is_64bit(dr_platform)) _sntprintf(wbuf, MAXIMUM_PATH, _TEXT(TSTR_FMT) RELEASE32_DLL, dr_root_dir); else _sntprintf(wbuf, MAXIMUM_PATH, _TEXT(TSTR_FMT) RELEASE64_DLL, dr_root_dir); } NULL_TERMINATE_BUFFER(wbuf); status = write_config_param(IF_REG_ELSE(proc_policy, f), PARAM_STR(DYNAMORIO_VAR_AUTOINJECT), wbuf); DO_ASSERT(status == DR_SUCCESS); if (status != DR_SUCCESS) return status; /* set the logdir string */ /* XXX i#886: should we expose this in the dr_register_process() params (and * thus dr_process_is_registered() and dr_registered_process_iterator_next())? * We now have a -logdir runtime option so we don't need to expose it for full * functionality anymore but it would serve to reduce the length of option * strings to have more control over the default. Linux dr{config,run} does * allow such control today. */ _sntprintf(wbuf, MAXIMUM_PATH, _TEXT(TSTR_FMT) LOG_SUBDIR, dr_root_dir); NULL_TERMINATE_BUFFER(wbuf); status = write_config_param(IF_REG_ELSE(proc_policy, f), PARAM_STR(DYNAMORIO_VAR_LOGDIR), wbuf); DO_ASSERT(status == DR_SUCCESS); if (status != DR_SUCCESS) return status; /* set the options string last for faster updating w/ config files */ opt_info.mode = dr_mode; add_extra_option_char(&opt_info, dr_options); status = write_options(&opt_info, wbuf); if (status != DR_SUCCESS) return status; status = write_config_param(IF_REG_ELSE(proc_policy, f), PARAM_STR(DYNAMORIO_VAR_OPTIONS), wbuf); free_opt_info(&opt_info); DO_ASSERT(status == DR_SUCCESS); if (status != DR_SUCCESS) return status; #ifdef PARAMS_IN_REGISTRY /* write the registry */ if (write_config_group(policy) != ERROR_SUCCESS) { DO_ASSERT(false); return DR_FAILURE; } #else fclose(f); #endif #ifdef WINDOWS /* If on win2k, copy drearlyhelper?.dll to system32 * FIXME: this requires admin privs! oh well: only issue is early inject * on win2k... */ if (get_platform(&platform) == ERROR_SUCCESS && platform == PLATFORM_WIN_2000) { _sntprintf(wbuf, MAXIMUM_PATH, _TEXT(TSTR_FMT) LIB32_SUBDIR, dr_root_dir); NULL_TERMINATE_BUFFER(wbuf); copy_earlyhelper_dlls(wbuf); } #endif return DR_SUCCESS; } dr_config_status_t dr_unregister_process(const char *process_name, process_id_t pid, bool global, dr_platform_t dr_platform) { #ifndef PARAMS_IN_REGISTRY char fname[MAXIMUM_PATH]; if (get_config_file_name(process_name, pid, global, dr_platform, fname, BUFFER_SIZE_ELEMENTS(fname))) { TCHAR wbuf[MAXIMUM_PATH]; convert_to_tchar(wbuf, fname, BUFFER_SIZE_ELEMENTS(wbuf)); NULL_TERMINATE_BUFFER(wbuf); if (_wremove(wbuf) == 0) return DR_SUCCESS; } return DR_FAILURE; #else ConfigGroup *policy = get_policy(dr_platform); ConfigGroup *proc_policy = get_proc_policy(policy, process_name); TCHAR wbuf[MAXIMUM_PATH]; dr_config_status_t status = DR_SUCCESS; if (proc_policy == NULL) { status = DR_PROC_REG_INVALID; goto exit; } /* remove it */ convert_to_tchar(wbuf, process_name, BUFFER_SIZE_ELEMENTS(wbuf)); NULL_TERMINATE_BUFFER(wbuf); remove_child(wbuf, policy); policy->should_clear = TRUE; /* write the registry */ if (write_config_group(policy) != ERROR_SUCCESS) { status = DR_FAILURE; goto exit; } /* FIXME PR 232738: we should remove the drdearlyhelp?.dlls and preinject * from system32, and remove the base reg keys, if dr_unregister_process() * removes the last registered process. */ exit: if (policy != NULL) free_config_group(policy); return status; #endif } /* For !PARAMS_IN_REGISTRY, process_name is NOT filled in! */ static void read_process_policy(IF_REG_ELSE(ConfigGroup *proc_policy, FILE *f), char *process_name /* OUT */, char *dr_root_dir /* OUT */, dr_operation_mode_t *dr_mode /* OUT */, bool *debug /* OUT */, char *dr_options /* OUT */) { TCHAR autoinject[MAX_CONFIG_VALUE]; opt_info_t opt_info; if (dr_mode != NULL) *dr_mode = DR_MODE_NONE; if (dr_root_dir != NULL) *dr_root_dir = '\0'; if (dr_options != NULL) *dr_options = '\0'; #ifdef PARAMS_IN_REGISTRY if (process_name != NULL) *process_name = '\0'; if (process_name != NULL && proc_policy->name != NULL) { SIZE_T len = MIN(_tcslen(proc_policy->name), MAXIMUM_PATH - 1); _snprintf(process_name, len, TSTR_FMT, proc_policy->name); process_name[len] = '\0'; } #else /* up to caller to fill in! */ #endif if (dr_root_dir != NULL && read_config_param(IF_REG_ELSE(proc_policy, f), PARAM_STR(DYNAMORIO_VAR_AUTOINJECT), autoinject, BUFFER_SIZE_ELEMENTS(autoinject))) { TCHAR *vers = _tcsstr(autoinject, RELEASE32_DLL); if (vers == NULL) { vers = _tcsstr(autoinject, DEBUG32_DLL); } if (vers == NULL) { vers = _tcsstr(autoinject, RELEASE64_DLL); } if (vers == NULL) { vers = _tcsstr(autoinject, DEBUG64_DLL); } if (vers != NULL) { size_t len = MIN(MAXIMUM_PATH - 1, vers - autoinject); _snprintf(dr_root_dir, len, TSTR_FMT, autoinject); dr_root_dir[len] = '\0'; } else { dr_root_dir[0] = '\0'; } } if (read_options(&opt_info, IF_REG_ELSE(proc_policy, f)) != DR_SUCCESS) { /* note: read_options() frees any memory it allocates if it fails */ return; } if (dr_mode != NULL) { *dr_mode = opt_info.mode; if (read_config_param(IF_REG_ELSE(proc_policy, f), PARAM_STR(DYNAMORIO_VAR_RUNUNDER), autoinject, BUFFER_SIZE_ELEMENTS(autoinject))) { if (_tcscmp(autoinject, _TEXT("0")) == 0) *dr_mode = DR_MODE_DO_NOT_RUN; } } if (debug != NULL) { if (_tcsstr(autoinject, DEBUG32_DLL) != NULL || _tcsstr(autoinject, DEBUG64_DLL) != NULL) { *debug = true; } else { *debug = false; } } if (dr_options != NULL) { uint i; size_t len_remain = DR_MAX_OPTIONS_LENGTH - 1, cur_off = 0; dr_options[0] = '\0'; for (i = 0; i < opt_info.num_extra_opts; i++) { size_t len; if (i > 0 && len_remain > 0) { len_remain--; dr_options[cur_off++] = ' '; } len = MIN(len_remain, _tcslen(opt_info.extra_opts[i])); _snprintf(dr_options + cur_off, len, TSTR_FMT, opt_info.extra_opts[i]); cur_off += len; len_remain -= len; dr_options[cur_off] = '\0'; } } free_opt_info(&opt_info); } /* FIXME i#840: NYI for Linux, need a FindFirstFile equivalent. */ #ifdef WINDOWS struct _dr_registered_process_iterator_t { # ifdef PARAMS_IN_REGISTRY ConfigGroup *policy; ConfigGroup *cur; # else bool has_next; HANDLE find_handle; /* because UNICODE is defined we have to use the wide version of * FindFirstFile and convert back and forth */ WIN32_FIND_DATA find_data; /* FindFirstFile only fills in the basename */ TCHAR wdir[MAXIMUM_PATH]; TCHAR wfname[MAXIMUM_PATH]; # endif }; dr_registered_process_iterator_t * dr_registered_process_iterator_start(dr_platform_t dr_platform, bool global) { dr_registered_process_iterator_t *iter = (dr_registered_process_iterator_t *)malloc( sizeof(dr_registered_process_iterator_t)); # ifdef PARAMS_IN_REGISTRY iter->policy = get_policy(dr_platform); if (iter->policy != NULL) iter->cur = iter->policy->children; else iter->cur = NULL; # else char dir[MAXIMUM_PATH]; if (!get_config_dir(global, dir, BUFFER_SIZE_ELEMENTS(dir), false)) { iter->has_next = false; return iter; } convert_to_tchar(iter->wdir, dir, BUFFER_SIZE_ELEMENTS(iter->wdir)); NULL_TERMINATE_BUFFER(iter->wdir); _sntprintf(iter->wfname, BUFFER_SIZE_ELEMENTS(iter->wfname), _TEXT("%s/*.%S"), iter->wdir, get_config_sfx(dr_platform)); NULL_TERMINATE_BUFFER(iter->wfname); iter->find_handle = FindFirstFile(iter->wfname, &iter->find_data); iter->has_next = (iter->find_handle != INVALID_HANDLE_VALUE); # endif return iter; } bool dr_registered_process_iterator_hasnext(dr_registered_process_iterator_t *iter) { # ifdef PARAMS_IN_REGISTRY return iter->policy != NULL && iter->cur != NULL; # else return iter->has_next; # endif } bool dr_registered_process_iterator_next(dr_registered_process_iterator_t *iter, char *process_name /* OUT */, char *dr_root_dir /* OUT */, dr_operation_mode_t *dr_mode /* OUT */, bool *debug /* OUT */, char *dr_options /* OUT */) { # ifdef PARAMS_IN_REGISTRY read_process_policy(iter->cur, process_name, dr_root_dir, dr_mode, debug, dr_options); iter->cur = iter->cur->next; return true; # else bool ok = true; FILE *f; _sntprintf(iter->wfname, BUFFER_SIZE_ELEMENTS(iter->wfname), _TEXT("%s/%s"), iter->wdir, iter->find_data.cFileName); NULL_TERMINATE_BUFFER(iter->wfname); f = _wfopen(iter->wfname, L"r"); if (process_name != NULL) { TCHAR *end; end = _tcsstr(iter->find_data.cFileName, _TEXT(".config")); if (end == NULL) { process_name[0] = '\0'; ok = false; } else { # ifdef UNICODE _snprintf(process_name, end - iter->find_data.cFileName, "%S", iter->find_data.cFileName); # else _snprintf(process_name, end - iter->find_data.cFileName, "%s", iter->find_data.cFileName); # endif } } if (!FindNextFile(iter->find_handle, &iter->find_data)) iter->has_next = false; if (f == NULL || !ok) return false; read_process_policy(f, process_name, dr_root_dir, dr_mode, debug, dr_options); fclose(f); return true; # endif } void dr_registered_process_iterator_stop(dr_registered_process_iterator_t *iter) { # ifdef PARAMS_IN_REGISTRY if (iter->policy != NULL) free_config_group(iter->policy); # else if (iter->find_handle != INVALID_HANDLE_VALUE) FindClose(iter->find_handle); # endif free(iter); } #endif /* WINDOWS */ bool dr_process_is_registered(const char *process_name, process_id_t pid, bool global, dr_platform_t dr_platform /* OUT */, char *dr_root_dir /* OUT */, dr_operation_mode_t *dr_mode /* OUT */, bool *debug /* OUT */, char *dr_options /* OUT */) { bool result = false; #ifdef PARAMS_IN_REGISTRY ConfigGroup *policy = get_policy(dr_platform); ConfigGroup *proc_policy = get_proc_policy(policy, process_name); #else FILE *f = open_config_file(process_name, pid, global, dr_platform, true /*read*/, false /*!write*/, false /*!overwrite*/); #endif if (IF_REG_ELSE(proc_policy == NULL, f == NULL)) goto exit; result = true; read_process_policy(IF_REG_ELSE(proc_policy, f), NULL, dr_root_dir, dr_mode, debug, dr_options); exit: #ifdef PARAMS_IN_REGISTRY if (policy != NULL) free_config_group(policy); #else if (f != NULL) fclose(f); #endif return result; } struct _dr_client_iterator_t { opt_info_t opt_info; uint cur; bool valid; }; dr_client_iterator_t * dr_client_iterator_start(const char *process_name, process_id_t pid, bool global, dr_platform_t dr_platform) { #ifdef PARAMS_IN_REGISTRY ConfigGroup *policy = get_policy(dr_platform); ConfigGroup *proc_policy = get_proc_policy(policy, process_name); #else FILE *f = open_config_file(process_name, pid, global, dr_platform, true /*read*/, false /*!write*/, false /*!overwrite*/); #endif dr_client_iterator_t *iter = (dr_client_iterator_t *)malloc(sizeof(dr_client_iterator_t)); iter->valid = false; iter->cur = 0; if (IF_REG_ELSE(proc_policy == NULL, f == NULL)) return iter; if (read_options(&iter->opt_info, IF_REG_ELSE(proc_policy, f)) != DR_SUCCESS) return iter; iter->valid = true; return iter; } bool dr_client_iterator_hasnext(dr_client_iterator_t *iter) { return iter->valid && iter->cur < iter->opt_info.num_clients; } void dr_client_iterator_next(dr_client_iterator_t *iter, client_id_t *client_id, /* OUT */ size_t *client_pri, /* OUT */ char *client_path, /* OUT */ char *client_options /* OUT */) { client_opt_t *client_opt = iter->opt_info.client_opts[iter->cur]; if (client_pri != NULL) *client_pri = iter->cur; if (client_path != NULL) { size_t len = MIN(MAXIMUM_PATH - 1, _tcslen(client_opt->path)); _snprintf(client_path, len, TSTR_FMT, client_opt->path); client_path[len] = '\0'; } if (client_id != NULL) *client_id = client_opt->id; if (client_options != NULL) { size_t len = MIN(DR_MAX_OPTIONS_LENGTH - 1, _tcslen(client_opt->opts)); _snprintf(client_options, len, TSTR_FMT, client_opt->opts); client_options[len] = '\0'; } iter->cur++; } void dr_client_iterator_stop(dr_client_iterator_t *iter) { if (iter->valid) free_opt_info(&iter->opt_info); free(iter); } size_t dr_num_registered_clients(const char *process_name, process_id_t pid, bool global, dr_platform_t dr_platform) { opt_info_t opt_info; size_t num = 0; #ifdef PARAMS_IN_REGISTRY ConfigGroup *policy = get_policy(dr_platform); ConfigGroup *proc_policy = get_proc_policy(policy, process_name); #else FILE *f = open_config_file(process_name, pid, global, dr_platform, true /*read*/, false /*!write*/, false /*!overwrite*/); #endif if (IF_REG_ELSE(proc_policy == NULL, f == NULL)) goto exit; if (read_options(&opt_info, IF_REG_ELSE(proc_policy, f)) != DR_SUCCESS) goto exit; num = opt_info.num_clients; free_opt_info(&opt_info); exit: #ifdef PARAMS_IN_REGISTRY if (policy != NULL) free_config_group(policy); #else fclose(f); #endif return num; } dr_config_status_t dr_get_client_info(const char *process_name, process_id_t pid, bool global, dr_platform_t dr_platform, client_id_t client_id, size_t *client_pri, char *client_path, char *client_options) { opt_info_t opt_info; dr_config_status_t status; size_t i; #ifdef PARAMS_IN_REGISTRY ConfigGroup *policy = get_policy(dr_platform); ConfigGroup *proc_policy = get_proc_policy(policy, process_name); #else FILE *f = open_config_file(process_name, pid, global, dr_platform, true /*read*/, false /*!write*/, false /*!overwrite*/); #endif if (IF_REG_ELSE(proc_policy == NULL, f == NULL)) { status = DR_PROC_REG_INVALID; goto exit; } status = read_options(&opt_info, IF_REG_ELSE(proc_policy, f)); if (status != DR_SUCCESS) goto exit; for (i = 0; i < opt_info.num_clients; i++) { if (opt_info.client_opts[i]->id == client_id) { client_opt_t *client_opt = opt_info.client_opts[i]; if (client_pri != NULL) { *client_pri = i; } if (client_path != NULL) { size_t len = MIN(MAXIMUM_PATH - 1, _tcslen(client_opt->path)); _snprintf(client_path, len, TSTR_FMT, client_opt->path); client_path[len] = '\0'; } if (client_options != NULL) { size_t len = MIN(DR_MAX_OPTIONS_LENGTH - 1, _tcslen(client_opt->opts)); _snprintf(client_options, len, TSTR_FMT, client_opt->opts); client_options[len] = '\0'; } status = DR_SUCCESS; goto exit; } } status = DR_ID_INVALID; exit: #ifdef PARAMS_IN_REGISTRY if (policy != NULL) free_config_group(policy); #else fclose(f); #endif return status; } dr_config_status_t dr_register_client(const char *process_name, process_id_t pid, bool global, dr_platform_t dr_platform, client_id_t client_id, size_t client_pri, const char *client_path, const char *client_options) { TCHAR new_opts[DR_MAX_OPTIONS_LENGTH]; TCHAR wpath[MAXIMUM_PATH], woptions[DR_MAX_OPTIONS_LENGTH]; #ifdef PARAMS_IN_REGISTRY ConfigGroup *policy = get_policy(dr_platform); ConfigGroup *proc_policy = get_proc_policy(policy, process_name); #else FILE *f = open_config_file(process_name, pid, global, dr_platform, true /*read*/, true /*write*/, false /*!overwrite*/); #endif dr_config_status_t status; opt_info_t opt_info; bool opt_info_alloc = false; size_t i; if (IF_REG_ELSE(proc_policy == NULL, f == NULL)) { status = DR_PROC_REG_INVALID; goto exit; } status = read_options(&opt_info, IF_REG_ELSE(proc_policy, f)); if (status != DR_SUCCESS) { goto exit; } opt_info_alloc = true; for (i = 0; i < opt_info.num_clients; i++) { if (opt_info.client_opts[i]->id == client_id) { status = DR_ID_CONFLICTING; goto exit; } } if (client_pri > opt_info.num_clients) { status = DR_PRIORITY_INVALID; goto exit; } convert_to_tchar(wpath, client_path, MAXIMUM_PATH); NULL_TERMINATE_BUFFER(wpath); convert_to_tchar(woptions, client_options, DR_MAX_OPTIONS_LENGTH); NULL_TERMINATE_BUFFER(woptions); status = add_client_lib(&opt_info, client_id, client_pri, wpath, woptions); if (status != DR_SUCCESS) { goto exit; } /* write the registry */ status = write_options(&opt_info, new_opts); if (status != DR_SUCCESS) goto exit; #ifndef PARAMS_IN_REGISTRY /* shift rest of file up, overwriting old value, so we can append new value */ read_config_ex(f, DYNAMORIO_VAR_OPTIONS, NULL, 0, true); #endif status = write_config_param(IF_REG_ELSE(proc_policy, f), PARAM_STR(DYNAMORIO_VAR_OPTIONS), new_opts); if (status != DR_SUCCESS) goto exit; #ifdef PARAMS_IN_REGISTRY if (write_config_group(policy) != ERROR_SUCCESS) { status = DR_FAILURE; goto exit; } #endif status = DR_SUCCESS; exit: #ifdef PARAMS_IN_REGISTRY if (policy != NULL) free_config_group(policy); #else if (f != NULL) fclose(f); #endif if (opt_info_alloc) free_opt_info(&opt_info); return status; } dr_config_status_t dr_unregister_client(const char *process_name, process_id_t pid, bool global, dr_platform_t dr_platform, client_id_t client_id) { TCHAR new_opts[DR_MAX_OPTIONS_LENGTH]; #ifdef PARAMS_IN_REGISTRY ConfigGroup *policy = get_policy(dr_platform); ConfigGroup *proc_policy = get_proc_policy(policy, process_name); #else FILE *f = open_config_file(process_name, pid, global, dr_platform, true /*read*/, true /*write*/, false /*!overwrite*/); #endif dr_config_status_t status; opt_info_t opt_info; bool opt_info_alloc = false; if (IF_REG_ELSE(proc_policy == NULL, f == NULL)) { status = DR_PROC_REG_INVALID; goto exit; } status = read_options(&opt_info, IF_REG_ELSE(proc_policy, f)); if (status != DR_SUCCESS) { goto exit; } opt_info_alloc = true; status = remove_client_lib(&opt_info, client_id); if (status != DR_SUCCESS) { goto exit; } /* write the registry */ write_options(&opt_info, new_opts); #ifndef PARAMS_IN_REGISTRY /* shift rest of file up, overwriting old value, so we can append new value */ read_config_ex(f, DYNAMORIO_VAR_OPTIONS, NULL, 0, true); #endif write_config_param(IF_REG_ELSE(proc_policy, f), PARAM_STR(DYNAMORIO_VAR_OPTIONS), new_opts); #ifdef PARAMS_IN_REGISTRY if (write_config_group(policy) != ERROR_SUCCESS) { status = DR_FAILURE; goto exit; } #endif status = DR_SUCCESS; exit: #ifdef PARAMS_IN_REGISTRY if (policy != NULL) free_config_group(policy); #else if (f != NULL) fclose(f); #endif if (opt_info_alloc) free_opt_info(&opt_info); return status; } #ifdef WINDOWS typedef struct { const char *process_name; /* if non-null nudges processes with matching name */ bool all; /* if set attempts to nudge all processes */ client_id_t client_id; uint64 argument; int count; /* number of nudges successfully delivered */ DWORD res; /* last failing error code */ DWORD timeout; /* amount of time to wait for nudge to finish */ } pw_nudge_callback_data_t; static BOOL pw_nudge_callback(process_info_t *pi, void **param) { char buf[MAXIMUM_PATH]; pw_nudge_callback_data_t *data = (pw_nudge_callback_data_t *)param; if (pi->ProcessID == 0) return true; /* skip system process */ buf[0] = '\0'; if (pi->ProcessName != NULL) _snprintf(buf, MAXIMUM_PATH, "%S", pi->ProcessName); NULL_TERMINATE_BUFFER(buf); if (data->all || (data->process_name != NULL && strnicmp(data->process_name, buf, MAXIMUM_PATH) == 0)) { DWORD res = generic_nudge(pi->ProcessID, true, NUDGE_GENERIC(client), data->client_id, data->argument, data->timeout); if (res == ERROR_SUCCESS || res == ERROR_TIMEOUT) { data->count++; if (res == ERROR_TIMEOUT && data->timeout != 0) data->res = ERROR_TIMEOUT; } else if (res != ERROR_MOD_NOT_FOUND /* so failed for a good reason */) data->res = res; } return true; } /* TODO: must be careful in invoking the correct VIPA's nudge handler, * particularly a problem with multiple agents, but can be a problem even in a * single agent if some other dll exports dr_nudge_handler() (remote * contingency). */ dr_config_status_t dr_nudge_process(const char *process_name, client_id_t client_id, uint64 arg, uint timeout_ms, int *nudge_count /*OUT */) { pw_nudge_callback_data_t data = { 0 }; data.process_name = process_name; data.client_id = client_id; data.argument = arg; data.timeout = timeout_ms; data.res = ERROR_SUCCESS; process_walk(&pw_nudge_callback, (void **)&data); if (nudge_count != NULL) *nudge_count = data.count; if (data.res == ERROR_SUCCESS) return DR_SUCCESS; else if (data.res == ERROR_TIMEOUT) return DR_NUDGE_TIMEOUT; else return DR_FAILURE; } dr_config_status_t dr_nudge_pid(process_id_t process_id, client_id_t client_id, uint64 arg, uint timeout_ms) { DWORD res = generic_nudge(process_id, true, NUDGE_GENERIC(client), client_id, arg, timeout_ms); if (res == ERROR_SUCCESS) return DR_SUCCESS; if (res == ERROR_MOD_NOT_FOUND) return DR_NUDGE_PID_NOT_INJECTED; if (res == ERROR_TIMEOUT && timeout_ms != 0) return DR_NUDGE_TIMEOUT; return DR_FAILURE; } dr_config_status_t dr_nudge_all(client_id_t client_id, uint64 arg, uint timeout_ms, int *nudge_count /*OUT*/) { pw_nudge_callback_data_t data = { 0 }; data.all = true; data.client_id = client_id; data.argument = arg; data.timeout = timeout_ms; data.res = ERROR_SUCCESS; process_walk(&pw_nudge_callback, (void **)&data); if (nudge_count != NULL) *nudge_count = data.count; if (data.res == ERROR_SUCCESS) return DR_SUCCESS; if (data.res == ERROR_TIMEOUT) return DR_NUDGE_TIMEOUT; return DR_FAILURE; } #elif defined LINUX dr_config_status_t dr_nudge_pid(process_id_t process_id, client_id_t client_id, uint64 arg, uint timeout_ms) { siginfo_t info; int res; /* construct the payload */ if (!create_nudge_signal_payload(&info, NUDGE_GENERIC(client), client_id, arg)) return DR_FAILURE; /* send the nudge */ res = syscall(SYS_rt_sigqueueinfo, process_id, NUDGESIG_SIGNUM, &info); if (res < 0) return DR_FAILURE; return DR_SUCCESS; } #endif /* WINDOWS */ /* XXX: perhaps we should take in a config dir as a parameter to all * of the registration routines in the drconfiglib API rather than or * in addition to having this env var DYNAMORIO_CONFIGLIB. * Xref i#939. */ dr_config_status_t dr_get_config_dir(bool global, bool alternative_local, char *config_dir /* OUT */, size_t config_dir_sz) { if (get_config_dir(global, config_dir, config_dir_sz, alternative_local)) { /* XXX: it would be nice to return DR_CONFIG_STRING_TOO_LONG if the * buffer is too small, rather than just truncating it */ return DR_SUCCESS; } else return DR_CONFIG_DIR_NOT_FOUND; }
1
15,688
See below: let's use the existing convention _tcsnlen.
DynamoRIO-dynamorio
c
@@ -6,10 +6,6 @@ describe Step do describe "Validations" do it { should validate_presence_of(:proposal) } - it do - create(:approval_step) # needed for spec, see https://github.com/thoughtbot/shoulda-matchers/issues/194 - should validate_uniqueness_of(:user_id).scoped_to(:proposal_id) - end end let(:approval) { create(:approval_step) }
1
describe Step do describe "Associations" do it { should belong_to(:user) } it { should belong_to(:proposal) } end describe "Validations" do it { should validate_presence_of(:proposal) } it do create(:approval_step) # needed for spec, see https://github.com/thoughtbot/shoulda-matchers/issues/194 should validate_uniqueness_of(:user_id).scoped_to(:proposal_id) end end let(:approval) { create(:approval_step) } describe '#api_token' do let!(:token) { create(:api_token, step: approval) } it "returns the token" do expect(approval.api_token).to eq(token) end it "returns nil if the token's been used" do token.update_attribute(:used_at, 1.day.ago) approval.reload expect(approval.api_token).to eq(nil) end it "returns nil if the token's expired" do token.expire! approval.reload expect(approval.api_token).to eq(nil) end end describe '#completed_at' do it 'is nil when pending' do expect(approval.completed_at).to be_nil end it 'is nil when actionable' do approval.initialize! expect(approval.completed_at).to be_nil end it 'is set when approved' do approval.initialize! approval.complete! expect(approval.completed_at).not_to be_nil approval.reload expect(approval.completed_at).not_to be_nil end end describe '#on_completed_entry' do it "notified the proposal if the root gets completed" do expect(approval.proposal).to receive(:complete!).once approval.initialize! approval.complete! end it "does not notify the proposal if a child gets completed" do proposal = create(:proposal) child1 = build(:approval_step, user: create(:user)) child2 = build(:approval_step, user: create(:user)) proposal.root_step = build(:parallel_step, child_steps: [child1, child2]) expect(proposal).not_to receive(:complete!) child1.complete! end end describe "database constraints" do it "deletes steps when parent proposal is destroyed" do proposal = create(:proposal) step = create(:step, proposal: proposal) expect(Step.exists?(step.id)).to eq true expect(Proposal.exists?(proposal.id)).to eq true proposal.destroy expect(Step.exists?(step.id)).to eq false expect(Proposal.exists?(proposal.id)).to eq false end end describe "complicated approval chains" do # Approval hierarchy version of needing *two* of the following: # 1) Amy AND Bob # 2) Carrie # 3) Dan THEN Erin let!(:amy) { create(:user) } let!(:bob) { create(:user) } let!(:carrie) { create(:user) } let!(:dan) { create(:user) } let!(:erin) { create(:user) } let!(:proposal) { create(:proposal) } before :each do allow(DispatchFinder).to receive(:run).with(proposal).and_return( double(step_complete: true) ) # @todo syntax for this will get cleaned up and_clause = create(:parallel_step, child_steps: [ create(:approval_step, user: amy), create(:approval_step, user: bob) ]) then_clause = create(:serial_step, child_steps: [ create(:approval_step, user: dan), create(:approval_step, user: erin) ]) proposal.root_step = create(:parallel_step, min_children_needed: 2, child_steps: [ and_clause, create(:approval_step, user: carrie), then_clause ]) end it "won't approve Amy and Bob -- needs two branches of the OR" do build_approvals expect_any_instance_of(Proposal).not_to receive(:complete!) proposal.existing_or_delegated_step_for(amy).complete! proposal.existing_or_delegated_step_for(bob).complete! end it "will approve if Amy, Bob, and Carrie approve -- two branches of the OR" do build_approvals expect_any_instance_of(Proposal).to receive(:complete!) proposal.existing_or_delegated_step_for(amy).complete! proposal.existing_or_delegated_step_for(bob).complete! proposal.existing_or_delegated_step_for(carrie).complete! end it "won't approve Amy, Bob, Dan as Erin is also required (to complete the THEN)" do build_approvals expect_any_instance_of(Proposal).not_to receive(:complete!) proposal.existing_or_delegated_step_for(amy).complete! proposal.existing_or_delegated_step_for(bob).complete! proposal.existing_or_delegated_step_for(dan).complete! end it "will approve Amy, Bob, Dan, Erin -- two branches of the OR" do build_approvals expect_any_instance_of(Proposal).to receive(:complete!) proposal.existing_or_delegated_step_for(amy).complete! proposal.existing_or_delegated_step_for(bob).complete! proposal.existing_or_delegated_step_for(dan).complete! proposal.existing_or_delegated_step_for(erin).complete! end it "will approve Amy, Bob, Dan, Carrie -- two branches of the OR as Dan is irrelevant" do build_approvals expect_any_instance_of(Proposal).to receive(:complete!) proposal.existing_or_delegated_step_for(amy).complete! proposal.existing_or_delegated_step_for(bob).complete! proposal.existing_or_delegated_step_for(dan).complete! proposal.existing_or_delegated_step_for(carrie).complete! end def build_approvals and_clause = build( :parallel_step, child_steps: [ build(:approval_step, user: amy), build(:approval_step, user: bob) ] ) then_clause = build( :parallel_step, child_steps: [ build(:approval_step, user: dan), build(:approval_step, user: erin) ] ) proposal.root_step = build( :parallel_step, min_children_needed: 2, child_steps: [ and_clause, build(:approval_step, user: carrie), then_clause ] ) end end end
1
17,725
Why don't we need this any more?
18F-C2
rb
@@ -0,0 +1,14 @@ +_base_ = ['./sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py'] + +model = dict( + pretrained='torchvision://resnet101', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), +)
1
1
21,817
clean unnecessary comma
open-mmlab-mmdetection
py
@@ -75,11 +75,14 @@ export function createVNode(type, props, key, ref, original) { // a _nextDom that has been set to `null` _nextDom: undefined, _component: null, - constructor: undefined, - _original: original + constructor: undefined }; - if (original == null) vnode._original = vnode; + Object.defineProperty(vnode, '_original', { + value: original || vnode, + enumerable: false, + writable: true + }); if (options.vnode) options.vnode(vnode); return vnode;
1
import options from './options'; /** * Create an virtual node (used for JSX) * @param {import('./internal').VNode["type"]} type The node name or Component * constructor for this virtual node * @param {object | null | undefined} [props] The properties of the virtual node * @param {Array<import('.').ComponentChildren>} [children] The children of the virtual node * @returns {import('./internal').VNode} */ export function createElement(type, props, children) { let normalizedProps = {}, i; for (i in props) { if (i !== 'key' && i !== 'ref') normalizedProps[i] = props[i]; } if (arguments.length > 3) { children = [children]; // https://github.com/preactjs/preact/issues/1916 for (i = 3; i < arguments.length; i++) { children.push(arguments[i]); } } if (children != null) { normalizedProps.children = children; } // If a Component VNode, check for and apply defaultProps // Note: type may be undefined in development, must never error here. if (typeof type == 'function' && type.defaultProps != null) { for (i in type.defaultProps) { if (normalizedProps[i] === undefined) { normalizedProps[i] = type.defaultProps[i]; } } } return createVNode( type, normalizedProps, props && props.key, props && props.ref, null ); } /** * Create a VNode (used internally by Preact) * @param {import('./internal').VNode["type"]} type The node name or Component * Constructor for this virtual node * @param {object | string | number | null} props The properties of this virtual node. * If this virtual node represents a text node, this is the text of the node (string or number). * @param {string | number | null} key The key for this virtual node, used when * diffing it against its children * @param {import('./internal').VNode["ref"]} ref The ref property that will * receive a reference to its created child * @returns {import('./internal').VNode} */ export function createVNode(type, props, key, ref, original) { // V8 seems to be better at detecting type shapes if the object is allocated from the same call site // Do not inline into createElement and coerceToVNode! const vnode = { type, props, key, ref, _children: null, _parent: null, _depth: 0, _dom: null, // _nextDom must be initialized to undefined b/c it will eventually // be set to dom.nextSibling which can return `null` and it is important // to be able to distinguish between an uninitialized _nextDom and // a _nextDom that has been set to `null` _nextDom: undefined, _component: null, constructor: undefined, _original: original }; if (original == null) vnode._original = vnode; if (options.vnode) options.vnode(vnode); return vnode; } export function createRef() { return {}; } export function Fragment(props) { return props.children; } /** * Check if a the argument is a valid Preact VNode. * @param {*} vnode * @returns {vnode is import('./internal').VNode} */ export const isValidElement = vnode => vnode != null && vnode.constructor === undefined;
1
15,589
This is the default value for enumerable.
preactjs-preact
js
@@ -61,8 +61,8 @@ std::string chemicalReactionTemplatesToString( bool toSmiles, bool canonical) { std::string res = ""; std::vector<std::string> vfragsmi; - RDKit::MOL_SPTR_VECT::const_iterator begin = getStartIterator(rxn, type); - RDKit::MOL_SPTR_VECT::const_iterator end = getEndIterator(rxn, type); + auto begin = getStartIterator(rxn, type); + auto end = getEndIterator(rxn, type); for (; begin != end; ++begin) { vfragsmi.push_back(molToString(**begin, toSmiles)); }
1
// $Id$ // // Copyright (c) 2010-2014, Novartis Institutes for BioMedical Research Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // * Neither the name of Novartis Institutes for BioMedical Research Inc. // nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written // permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <GraphMol/ChemReactions/Reaction.h> #include <GraphMol/ChemReactions/ReactionParser.h> #include <GraphMol/ChemReactions/ReactionUtils.h> #include <GraphMol/FileParsers/FileParsers.h> #include <GraphMol/SmilesParse/SmartsWrite.h> #include <GraphMol/SmilesParse/SmilesWrite.h> #include <sstream> namespace { void setRXNRoleOfAllMoleculeAtoms(RDKit::ROMol &mol, int role) { RDKit::ROMol::ATOM_ITER_PAIR atItP = mol.getVertices(); while (atItP.first != atItP.second) { RDKit::Atom *oAtom = mol[*(atItP.first++)].get(); oAtom->setProp(RDKit::common_properties::molRxnRole, role); } } std::string molToString(RDKit::ROMol &mol, bool toSmiles) { if (toSmiles) { return MolToSmiles(mol, true); } return MolToSmarts(mol, true); } std::string chemicalReactionTemplatesToString( const RDKit::ChemicalReaction &rxn, RDKit::ReactionMoleculeType type, bool toSmiles, bool canonical) { std::string res = ""; std::vector<std::string> vfragsmi; RDKit::MOL_SPTR_VECT::const_iterator begin = getStartIterator(rxn, type); RDKit::MOL_SPTR_VECT::const_iterator end = getEndIterator(rxn, type); for (; begin != end; ++begin) { vfragsmi.push_back(molToString(**begin, toSmiles)); } if (canonical) { std::sort(vfragsmi.begin(), vfragsmi.end()); } for (unsigned i = 0; i < vfragsmi.size(); ++i) { res += vfragsmi[i]; if (i < vfragsmi.size() - 1) { res += "."; } } return res; } std::string chemicalReactionToRxnToString(const RDKit::ChemicalReaction &rxn, bool toSmiles, bool canonical) { std::string res = ""; res += chemicalReactionTemplatesToString(rxn, RDKit::Reactant, toSmiles, canonical); res += ">"; res += chemicalReactionTemplatesToString(rxn, RDKit::Agent, toSmiles, canonical); res += ">"; res += chemicalReactionTemplatesToString(rxn, RDKit::Product, toSmiles, canonical); return res; } } namespace RDKit { //! returns the reaction SMARTS for a reaction std::string ChemicalReactionToRxnSmarts(const ChemicalReaction &rxn) { return chemicalReactionToRxnToString(rxn, false, false); }; //! returns the reaction SMILES for a reaction std::string ChemicalReactionToRxnSmiles(const ChemicalReaction &rxn, bool canonical) { return chemicalReactionToRxnToString(rxn, true, canonical); }; #if 1 //! returns an RXN block for a reaction std::string ChemicalReactionToRxnBlock(const ChemicalReaction &rxn, bool separateAgents) { std::ostringstream res; res << "$RXN\n\n RDKit\n\n"; if (separateAgents) { res << std::setw(3) << rxn.getNumReactantTemplates() << std::setw(3) << rxn.getNumProductTemplates() << std::setw(3) << rxn.getNumAgentTemplates() << "\n"; } else { res << std::setw(3) << (rxn.getNumReactantTemplates() + rxn.getNumAgentTemplates()) << std::setw(3) << rxn.getNumProductTemplates() << "\n"; } for (MOL_SPTR_VECT::const_iterator iter = rxn.beginReactantTemplates(); iter != rxn.endReactantTemplates(); ++iter) { // to write the mol block, we need ring information: MolOps::findSSSR(**iter); res << "$MOL\n"; res << MolToMolBlock(**iter, true, -1, false); } if (!separateAgents) { for (MOL_SPTR_VECT::const_iterator iter = rxn.beginAgentTemplates(); iter != rxn.endAgentTemplates(); ++iter) { // to write the mol block, we need ring information: MolOps::findSSSR(**iter); res << "$MOL\n"; res << MolToMolBlock(**iter, true, -1, false); } } for (MOL_SPTR_VECT::const_iterator iter = rxn.beginProductTemplates(); iter != rxn.endProductTemplates(); ++iter) { // to write the mol block, we need ring information: MolOps::findSSSR(**iter); res << "$MOL\n"; res << MolToMolBlock(**iter, true, -1, false); } if (separateAgents) { for (MOL_SPTR_VECT::const_iterator iter = rxn.beginAgentTemplates(); iter != rxn.endAgentTemplates(); ++iter) { // to write the mol block, we need ring information: MolOps::findSSSR(**iter); res << "$MOL\n"; res << MolToMolBlock(**iter, true, -1, false); } } return res.str(); }; #endif //! returns a ROMol with RXNMolRole used for a reaction ROMol *ChemicalReactionToRxnMol(const ChemicalReaction &rxn) { RWMol *res = new RWMol(); for (MOL_SPTR_VECT::const_iterator iter = rxn.beginReactantTemplates(); iter != rxn.endReactantTemplates(); ++iter) { setRXNRoleOfAllMoleculeAtoms(*iter->get(), 1); res->insertMol(*iter->get()); } for (MOL_SPTR_VECT::const_iterator iter = rxn.beginProductTemplates(); iter != rxn.endProductTemplates(); ++iter) { setRXNRoleOfAllMoleculeAtoms(*iter->get(), 2); res->insertMol(*iter->get()); } for (MOL_SPTR_VECT::const_iterator iter = rxn.beginAgentTemplates(); iter != rxn.endAgentTemplates(); ++iter) { setRXNRoleOfAllMoleculeAtoms(*iter->get(), 3); res->insertMol(*iter->get()); } return (ROMol *)res; } }
1
16,090
Just above, you have `const auto` for a `RDKit::MOL_SPTR_VECT::const_iterator`, here it is only `auto`. Why is this?
rdkit-rdkit
cpp
@@ -186,9 +186,10 @@ bool Actions::registerEvent(Event_ptr event, const pugi::xml_node& node) return false; } -bool Actions::registerLuaEvent(Event* event) +bool Actions::registerLuaEvent(Action* event) { - Action_ptr action{ static_cast<Action*>(event) }; //event is guaranteed to be an Action + Event_ptr ptr = Event_ptr(event); + Action_ptr action{ static_cast<Action*>(ptr.release()) }; //event is guaranteed to be an Action if (action->getItemIdRange().size() > 0) { if (action->getItemIdRange().size() == 1) { auto result = useItemMap.emplace(action->getItemIdRange().at(0), std::move(*action));
1
/** * The Forgotten Server - a free and open-source MMORPG server emulator * Copyright (C) 2018 Mark Samman <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "otpch.h" #include "actions.h" #include "bed.h" #include "configmanager.h" #include "container.h" #include "game.h" #include "pugicast.h" #include "spells.h" extern Game g_game; extern Spells* g_spells; extern Actions* g_actions; extern ConfigManager g_config; Actions::Actions() : scriptInterface("Action Interface") { scriptInterface.initState(); } Actions::~Actions() { clear(); } void Actions::clearMap(ActionUseMap& map) { map.clear(); } void Actions::clear() { clearMap(useItemMap); clearMap(uniqueItemMap); clearMap(actionItemMap); scriptInterface.reInitState(); } LuaScriptInterface& Actions::getScriptInterface() { return scriptInterface; } std::string Actions::getScriptBaseName() const { return "actions"; } Event_ptr Actions::getEvent(const std::string& nodeName) { if (strcasecmp(nodeName.c_str(), "action") != 0) { return nullptr; } return Event_ptr(new Action(&scriptInterface)); } bool Actions::registerEvent(Event_ptr event, const pugi::xml_node& node) { Action_ptr action{static_cast<Action*>(event.release())}; //event is guaranteed to be an Action pugi::xml_attribute attr; if ((attr = node.attribute("itemid"))) { uint16_t id = pugi::cast<uint16_t>(attr.value()); auto result = useItemMap.emplace(id, std::move(*action)); if (!result.second) { std::cout << "[Warning - Actions::registerEvent] Duplicate registered item with id: " << id << std::endl; } return result.second; } else if ((attr = node.attribute("fromid"))) { pugi::xml_attribute toIdAttribute = node.attribute("toid"); if (!toIdAttribute) { std::cout << "[Warning - Actions::registerEvent] Missing toid in fromid: " << attr.as_string() << std::endl; return false; } uint16_t fromId = pugi::cast<uint16_t>(attr.value()); uint16_t iterId = fromId; uint16_t toId = pugi::cast<uint16_t>(toIdAttribute.value()); auto result = useItemMap.emplace(iterId, *action); if (!result.second) { std::cout << "[Warning - Actions::registerEvent] Duplicate registered item with id: " << iterId << " in fromid: " << fromId << ", toid: " << toId << std::endl; } bool success = result.second; while (++iterId <= toId) { result = useItemMap.emplace(iterId, *action); if (!result.second) { std::cout << "[Warning - Actions::registerEvent] Duplicate registered item with id: " << iterId << " in fromid: " << fromId << ", toid: " << toId << std::endl; continue; } success = true; } return success; } else if ((attr = node.attribute("uniqueid"))) { uint16_t uid = pugi::cast<uint16_t>(attr.value()); auto result = uniqueItemMap.emplace(uid, std::move(*action)); if (!result.second) { std::cout << "[Warning - Actions::registerEvent] Duplicate registered item with uniqueid: " << uid << std::endl; } return result.second; } else if ((attr = node.attribute("fromuid"))) { pugi::xml_attribute toUidAttribute = node.attribute("touid"); if (!toUidAttribute) { std::cout << "[Warning - Actions::registerEvent] Missing touid in fromuid: " << attr.as_string() << std::endl; return false; } uint16_t fromUid = pugi::cast<uint16_t>(attr.value()); uint16_t iterUid = fromUid; uint16_t toUid = pugi::cast<uint16_t>(toUidAttribute.value()); auto result = uniqueItemMap.emplace(iterUid, *action); if (!result.second) { std::cout << "[Warning - Actions::registerEvent] Duplicate registered item with unique id: " << iterUid << " in fromuid: " << fromUid << ", touid: " << toUid << std::endl; } bool success = result.second; while (++iterUid <= toUid) { result = uniqueItemMap.emplace(iterUid, *action); if (!result.second) { std::cout << "[Warning - Actions::registerEvent] Duplicate registered item with unique id: " << iterUid << " in fromuid: " << fromUid << ", touid: " << toUid << std::endl; continue; } success = true; } return success; } else if ((attr = node.attribute("actionid"))) { uint16_t aid = pugi::cast<uint16_t>(attr.value()); auto result = actionItemMap.emplace(aid, std::move(*action)); if (!result.second) { std::cout << "[Warning - Actions::registerEvent] Duplicate registered item with actionid: " << aid << std::endl; } return result.second; } else if ((attr = node.attribute("fromaid"))) { pugi::xml_attribute toAidAttribute = node.attribute("toaid"); if (!toAidAttribute) { std::cout << "[Warning - Actions::registerEvent] Missing toaid in fromaid: " << attr.as_string() << std::endl; return false; } uint16_t fromAid = pugi::cast<uint16_t>(attr.value()); uint16_t iterAid = fromAid; uint16_t toAid = pugi::cast<uint16_t>(toAidAttribute.value()); auto result = actionItemMap.emplace(iterAid, *action); if (!result.second) { std::cout << "[Warning - Actions::registerEvent] Duplicate registered item with action id: " << iterAid << " in fromaid: " << fromAid << ", toaid: " << toAid << std::endl; } bool success = result.second; while (++iterAid <= toAid) { result = actionItemMap.emplace(iterAid, *action); if (!result.second) { std::cout << "[Warning - Actions::registerEvent] Duplicate registered item with action id: " << iterAid << " in fromaid: " << fromAid << ", toaid: " << toAid << std::endl; continue; } success = true; } return success; } return false; } bool Actions::registerLuaEvent(Event* event) { Action_ptr action{ static_cast<Action*>(event) }; //event is guaranteed to be an Action if (action->getItemIdRange().size() > 0) { if (action->getItemIdRange().size() == 1) { auto result = useItemMap.emplace(action->getItemIdRange().at(0), std::move(*action)); if (!result.second) { std::cout << "[Warning - Actions::registerLuaEvent] Duplicate registered item with id: " << action->getItemIdRange().at(0) << std::endl; } return result.second; } else { auto v = action->getItemIdRange(); for (auto i = v.begin(); i != v.end(); i++) { auto result = useItemMap.emplace(*i, std::move(*action)); if (!result.second) { std::cout << "[Warning - Actions::registerLuaEvent] Duplicate registered item with id: " << *i << " in range from id: " << v.at(0) << ", to id: " << v.at(v.size() - 1) << std::endl; continue; } } return true; } } else if (action->getUniqueIdRange().size() > 0) { if (action->getUniqueIdRange().size() == 1) { auto result = uniqueItemMap.emplace(action->getUniqueIdRange().at(0), std::move(*action)); if (!result.second) { std::cout << "[Warning - Actions::registerLuaEvent] Duplicate registered item with uid: " << action->getUniqueIdRange().at(0) << std::endl; } return result.second; } else { auto v = action->getUniqueIdRange(); for (auto i = v.begin(); i != v.end(); i++) { auto result = uniqueItemMap.emplace(*i, std::move(*action)); if (!result.second) { std::cout << "[Warning - Actions::registerLuaEvent] Duplicate registered item with uid: " << *i << " in range from uid: " << v.at(0) << ", to uid: " << v.at(v.size() - 1) << std::endl; continue; } } return true; } } else if (action->getActionIdRange().size() > 0) { if (action->getActionIdRange().size() == 1) { auto result = actionItemMap.emplace(action->getActionIdRange().at(0), std::move(*action)); if (!result.second) { std::cout << "[Warning - Actions::registerLuaEvent] Duplicate registered item with aid: " << action->getActionIdRange().at(0) << std::endl; } return result.second; } else { auto v = action->getActionIdRange(); for (auto i = v.begin(); i != v.end(); i++) { auto result = actionItemMap.emplace(*i, std::move(*action)); if (!result.second) { std::cout << "[Warning - Actions::registerLuaEvent] Duplicate registered item with aid: " << *i << " in range from aid: " << v.at(0) << ", to aid: " << v.at(v.size() - 1) << std::endl; continue; } } return true; } } else { std::cout << "[Warning - Actions::registerLuaEvent] There is no id / aid / uid set for this event" << std::endl; return false; } } ReturnValue Actions::canUse(const Player* player, const Position& pos) { if (pos.x != 0xFFFF) { const Position& playerPos = player->getPosition(); if (playerPos.z != pos.z) { return playerPos.z > pos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS; } if (!Position::areInRange<1, 1>(playerPos, pos)) { return RETURNVALUE_TOOFARAWAY; } } return RETURNVALUE_NOERROR; } ReturnValue Actions::canUse(const Player* player, const Position& pos, const Item* item) { Action* action = getAction(item); if (action) { return action->canExecuteAction(player, pos); } return RETURNVALUE_NOERROR; } ReturnValue Actions::canUseFar(const Creature* creature, const Position& toPos, bool checkLineOfSight, bool checkFloor) { if (toPos.x == 0xFFFF) { return RETURNVALUE_NOERROR; } const Position& creaturePos = creature->getPosition(); if (checkFloor && creaturePos.z != toPos.z) { return creaturePos.z > toPos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS; } if (!Position::areInRange<7, 5>(toPos, creaturePos)) { return RETURNVALUE_TOOFARAWAY; } if (checkLineOfSight && !g_game.canThrowObjectTo(creaturePos, toPos)) { return RETURNVALUE_CANNOTTHROW; } return RETURNVALUE_NOERROR; } Action* Actions::getAction(const Item* item) { if (item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) { auto it = uniqueItemMap.find(item->getUniqueId()); if (it != uniqueItemMap.end()) { return &it->second; } } if (item->hasAttribute(ITEM_ATTRIBUTE_ACTIONID)) { auto it = actionItemMap.find(item->getActionId()); if (it != actionItemMap.end()) { return &it->second; } } auto it = useItemMap.find(item->getID()); if (it != useItemMap.end()) { return &it->second; } //rune items return g_spells->getRuneSpell(item->getID()); } ReturnValue Actions::internalUseItem(Player* player, const Position& pos, uint8_t index, Item* item, bool isHotkey) { if (Door* door = item->getDoor()) { if (!door->canUse(player)) { return RETURNVALUE_CANNOTUSETHISOBJECT; } } Action* action = getAction(item); if (action) { if (action->isScripted()) { if (action->executeUse(player, item, pos, nullptr, pos, isHotkey)) { return RETURNVALUE_NOERROR; } if (item->isRemoved()) { return RETURNVALUE_CANNOTUSETHISOBJECT; } } else if (action->function) { if (action->function(player, item, pos, nullptr, pos, isHotkey)) { return RETURNVALUE_NOERROR; } } } if (BedItem* bed = item->getBed()) { if (!bed->canUse(player)) { return RETURNVALUE_CANNOTUSETHISOBJECT; } if (bed->trySleep(player)) { player->setBedItem(bed); g_game.sendOfflineTrainingDialog(player); } return RETURNVALUE_NOERROR; } if (Container* container = item->getContainer()) { Container* openContainer; //depot container if (DepotLocker* depot = container->getDepotLocker()) { DepotLocker* myDepotLocker = player->getDepotLocker(depot->getDepotId()); myDepotLocker->setParent(depot->getParent()->getTile()); openContainer = myDepotLocker; player->setLastDepotId(depot->getDepotId()); } else { openContainer = container; } uint32_t corpseOwner = container->getCorpseOwner(); if (corpseOwner != 0 && !player->canOpenCorpse(corpseOwner)) { return RETURNVALUE_YOUARENOTTHEOWNER; } //open/close container int32_t oldContainerId = player->getContainerID(openContainer); if (oldContainerId != -1) { player->onCloseContainer(openContainer); player->closeContainer(oldContainerId); } else { player->addContainer(index, openContainer); player->onSendContainer(openContainer); } return RETURNVALUE_NOERROR; } const ItemType& it = Item::items[item->getID()]; if (it.canReadText) { if (it.canWriteText) { player->setWriteItem(item, it.maxTextLen); player->sendTextWindow(item, it.maxTextLen, true); } else { player->setWriteItem(nullptr); player->sendTextWindow(item, 0, false); } return RETURNVALUE_NOERROR; } return RETURNVALUE_CANNOTUSETHISOBJECT; } bool Actions::useItem(Player* player, const Position& pos, uint8_t index, Item* item, bool isHotkey) { player->setNextAction(OTSYS_TIME() + g_config.getNumber(ConfigManager::ACTIONS_DELAY_INTERVAL)); player->stopWalk(); if (isHotkey) { showUseHotkeyMessage(player, item, player->getItemTypeCount(item->getID(), -1)); } ReturnValue ret = internalUseItem(player, pos, index, item, isHotkey); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); return false; } return true; } bool Actions::useItemEx(Player* player, const Position& fromPos, const Position& toPos, uint8_t toStackPos, Item* item, bool isHotkey, Creature* creature/* = nullptr*/) { player->setNextAction(OTSYS_TIME() + g_config.getNumber(ConfigManager::EX_ACTIONS_DELAY_INTERVAL)); player->stopWalk(); Action* action = getAction(item); if (!action) { player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT); return false; } ReturnValue ret = action->canExecuteAction(player, toPos); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); return false; } if (isHotkey) { showUseHotkeyMessage(player, item, player->getItemTypeCount(item->getID(), -1)); } if (!action->executeUse(player, item, fromPos, action->getTarget(player, creature, toPos, toStackPos), toPos, isHotkey)) { if (!action->hasOwnErrorHandler()) { player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT); } return false; } return true; } void Actions::showUseHotkeyMessage(Player* player, const Item* item, uint32_t count) { std::ostringstream ss; const ItemType& it = Item::items[item->getID()]; if (!it.showCount) { ss << "Using one of " << item->getName() << "..."; } else if (count == 1) { ss << "Using the last " << item->getName() << "..."; } else { ss << "Using one of " << count << ' ' << item->getPluralName() << "..."; } player->sendTextMessage(MESSAGE_INFO_DESCR, ss.str()); } Action::Action(LuaScriptInterface* interface) : Event(interface), function(nullptr), allowFarUse(false), checkFloor(true), checkLineOfSight(true) {} bool Action::configureEvent(const pugi::xml_node& node) { pugi::xml_attribute allowFarUseAttr = node.attribute("allowfaruse"); if (allowFarUseAttr) { allowFarUse = allowFarUseAttr.as_bool(); } pugi::xml_attribute blockWallsAttr = node.attribute("blockwalls"); if (blockWallsAttr) { checkLineOfSight = blockWallsAttr.as_bool(); } pugi::xml_attribute checkFloorAttr = node.attribute("checkfloor"); if (checkFloorAttr) { checkFloor = checkFloorAttr.as_bool(); } return true; } namespace { bool enterMarket(Player* player, Item*, const Position&, Thing*, const Position&, bool) { if (player->getLastDepotId() == -1) { return false; } player->sendMarketEnter(player->getLastDepotId()); return true; } } bool Action::loadFunction(const pugi::xml_attribute& attr, bool isScripted) { const char* functionName = attr.as_string(); if (strcasecmp(functionName, "market") == 0) { function = enterMarket; } else { if (!isScripted) { std::cout << "[Warning - Action::loadFunction] Function \"" << functionName << "\" does not exist." << std::endl; return false; } } if (!isScripted) { scripted = false; } return true; } std::string Action::getScriptEventName() const { return "onUse"; } ReturnValue Action::canExecuteAction(const Player* player, const Position& toPos) { if (!allowFarUse) { return g_actions->canUse(player, toPos); } else { return g_actions->canUseFar(player, toPos, checkLineOfSight, checkFloor); } } Thing* Action::getTarget(Player* player, Creature* targetCreature, const Position& toPosition, uint8_t toStackPos) const { if (targetCreature) { return targetCreature; } return g_game.internalGetThing(player, toPosition, toStackPos, 0, STACKPOS_USETARGET); } bool Action::executeUse(Player* player, Item* item, const Position& fromPosition, Thing* target, const Position& toPosition, bool isHotkey) { //onUse(player, item, fromPosition, target, toPosition, isHotkey) if (!scriptInterface->reserveScriptEnv()) { std::cout << "[Error - Action::executeUse] Call stack overflow" << std::endl; return false; } ScriptEnvironment* env = scriptInterface->getScriptEnv(); env->setScriptId(scriptId, scriptInterface); lua_State* L = scriptInterface->getLuaState(); scriptInterface->pushFunction(scriptId); LuaScriptInterface::pushUserdata<Player>(L, player); LuaScriptInterface::setMetatable(L, -1, "Player"); LuaScriptInterface::pushThing(L, item); LuaScriptInterface::pushPosition(L, fromPosition); LuaScriptInterface::pushThing(L, target); LuaScriptInterface::pushPosition(L, toPosition); LuaScriptInterface::pushBoolean(L, isHotkey); return scriptInterface->callFunction(6); }
1
15,682
@djarek just a quick question, wouldn't be `ptr.reset()` here better as it does not memory leak if we do not call delete on the raw pointer aswell? it maybe makes sense to use `ptr.release` if the event is guaranteed to be loaded again but on a reload it would memory leak if for example I removed that script from my files before reloading or not?
otland-forgottenserver
cpp
@@ -218,8 +218,12 @@ func (cfg *Config) GetString(key string) string { // GetStringSlice returns config value as []string. func (cfg *Config) GetStringSlice(key string) []string { - value := cfg.Get(key).(*cli.StringSlice) - return cast.ToStringSlice([]string(*value)) + switch cfg.Get(key).(type) { + case *cli.StringSlice: + return cast.ToStringSlice([]string(*cfg.Get(key).(*cli.StringSlice))) + default: + return cast.ToStringSlice(cfg.Get(key)) + } } // ParseBoolFlag parses a cli.BoolFlag from command's context and
1
/* * Copyright (C) 2019 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package config import ( "io/ioutil" "strings" "time" "github.com/BurntSushi/toml" "github.com/mysteriumnetwork/node/eventbus" "github.com/mysteriumnetwork/node/utils/jsonutil" "github.com/pkg/errors" "github.com/rs/zerolog/log" "github.com/spf13/cast" "gopkg.in/urfave/cli.v1" ) // Config stores application configuration in 3 separate maps (listed from the lowest priority to the highest): // // • Default values // // • User configuration (config.toml) // // • CLI flags type Config struct { userConfigLocation string defaults map[string]interface{} user map[string]interface{} cli map[string]interface{} eventBus eventbus.EventBus } // Current global configuration instance. var Current *Config func init() { Current = NewConfig() } // NewConfig creates a new configuration instance. func NewConfig() *Config { return &Config{ userConfigLocation: "", defaults: make(map[string]interface{}), user: make(map[string]interface{}), cli: make(map[string]interface{}), } } func (cfg *Config) userConfigLoaded() bool { return cfg.userConfigLocation != "" } // EnableEventPublishing enables config event publishing to the event bus. func (cfg *Config) EnableEventPublishing(eb eventbus.EventBus) { cfg.eventBus = eb } // LoadUserConfig loads and remembers user config location. func (cfg *Config) LoadUserConfig(location string) error { log.Debug().Msg("Loading user configuration: " + location) cfg.userConfigLocation = location _, err := toml.DecodeFile(cfg.userConfigLocation, &cfg.user) if err != nil { return errors.Wrap(err, "failed to decode configuration file") } cfgJson, err := jsonutil.ToJson(cfg.user) if err != nil { return err } log.Info().Msg("User configuration loaded: \n" + cfgJson) return nil } // SaveUserConfig saves user configuration to the file from which it was loaded. func (cfg *Config) SaveUserConfig() error { log.Info().Msg("Saving user configuration") if !cfg.userConfigLoaded() { return errors.New("user configuration cannot be saved, because it must be loaded first") } var out strings.Builder err := toml.NewEncoder(&out).Encode(cfg.user) if err != nil { return errors.Wrap(err, "failed to write configuration as toml") } err = ioutil.WriteFile(cfg.userConfigLocation, []byte(out.String()), 0700) if err != nil { return errors.Wrap(err, "failed to write configuration to file") } cfgJson, err := jsonutil.ToJson(cfg.user) if err != nil { return err } log.Info().Msg("User configuration written: \n" + cfgJson) return nil } // GetUserConfig returns user configuration. func (cfg *Config) GetUserConfig() map[string]interface{} { return cfg.user } // SetDefault sets default value for key. func (cfg *Config) SetDefault(key string, value interface{}) { cfg.set(&cfg.defaults, key, value) } // SetUser sets user configuration value for key. func (cfg *Config) SetUser(key string, value interface{}) { if cfg.eventBus != nil { cfg.eventBus.Publish(Topic(key), value) } cfg.set(&cfg.user, key, value) } // SetCLI sets value passed via CLI flag for key. func (cfg *Config) SetCLI(key string, value interface{}) { cfg.set(&cfg.cli, key, value) } // RemoveUser removes user configuration value for key. func (cfg *Config) RemoveUser(key string) { cfg.remove(&cfg.user, key) } // RemoveCLI removes configured CLI flag value by key. func (cfg *Config) RemoveCLI(key string) { cfg.remove(&cfg.cli, key) } // set sets value to a particular configuration value map. func (cfg *Config) set(configMap *map[string]interface{}, key string, value interface{}) { key = strings.ToLower(key) segments := strings.Split(key, ".") lastKey := strings.ToLower(segments[len(segments)-1]) deepestMap := deepSearch(*configMap, segments[0:len(segments)-1]) // set innermost value deepestMap[lastKey] = value } // remove removes a configured value from a particular configuration map. func (cfg *Config) remove(configMap *map[string]interface{}, key string) { key = strings.ToLower(key) segments := strings.Split(key, ".") lastKey := strings.ToLower(segments[len(segments)-1]) deepestMap := deepSearch(*configMap, segments[0:len(segments)-1]) // set innermost value delete(deepestMap, lastKey) } // Get returns stored config value as-is. func (cfg *Config) Get(key string) interface{} { segments := strings.Split(strings.ToLower(key), ".") cliValue := cfg.searchMap(cfg.cli, segments) if cliValue != nil { log.Debug().Msgf("Returning CLI value %v:%v", key, cliValue) return cliValue } userValue := cfg.searchMap(cfg.user, segments) if userValue != nil { log.Debug().Msgf("Returning user config value %v:%v", key, userValue) return userValue } defaultValue := cfg.searchMap(cfg.defaults, segments) log.Debug().Msgf("Returning default value %v:%v", key, defaultValue) return defaultValue } // GetBool returns config value as bool. func (cfg *Config) GetBool(key string) bool { return cast.ToBool(cfg.Get(key)) } // GetInt returns config value as int. func (cfg *Config) GetInt(key string) int { return cast.ToInt(cfg.Get(key)) } // GetUInt64 returns config value as uint64. func (cfg *Config) GetUInt64(key string) uint64 { return cast.ToUint64(cfg.Get(key)) } // GetFloat64 returns config value as float64. func (cfg *Config) GetFloat64(key string) float64 { return cast.ToFloat64(cfg.Get(key)) } // GetDuration returns config value as duration. func (cfg *Config) GetDuration(key string) time.Duration { return cast.ToDuration(cfg.Get(key)) } // GetString returns config value as string. func (cfg *Config) GetString(key string) string { return cast.ToString(cfg.Get(key)) } // GetStringSlice returns config value as []string. func (cfg *Config) GetStringSlice(key string) []string { value := cfg.Get(key).(*cli.StringSlice) return cast.ToStringSlice([]string(*value)) } // ParseBoolFlag parses a cli.BoolFlag from command's context and // sets default and CLI values to the application configuration. func (cfg *Config) ParseBoolFlag(ctx *cli.Context, flag cli.BoolFlag) { cfg.SetDefault(flag.Name, false) if ctx.IsSet(flag.Name) { cfg.SetCLI(flag.Name, ctx.Bool(flag.Name)) } else if ctx.GlobalIsSet(flag.Name) { cfg.SetCLI(flag.Name, ctx.GlobalBool(flag.Name)) } else { cfg.RemoveCLI(flag.Name) } } // ParseBoolTFlag parses a cli.BoolTFlag from command's context and // sets default and CLI values to the application configuration. func (cfg *Config) ParseBoolTFlag(ctx *cli.Context, flag cli.BoolTFlag) { cfg.SetDefault(flag.Name, true) if ctx.IsSet(flag.Name) { cfg.SetCLI(flag.Name, ctx.Bool(flag.Name)) } else if ctx.GlobalIsSet(flag.Name) { cfg.SetCLI(flag.Name, ctx.GlobalBool(flag.Name)) } else { cfg.RemoveCLI(flag.Name) } } // ParseIntFlag parses a cli.IntFlag from command's context and // sets default and CLI values to the application configuration. func (cfg *Config) ParseIntFlag(ctx *cli.Context, flag cli.IntFlag) { cfg.SetDefault(flag.Name, flag.Value) if ctx.IsSet(flag.Name) { cfg.SetCLI(flag.Name, ctx.Int(flag.Name)) } else if ctx.GlobalIsSet(flag.Name) { cfg.SetCLI(flag.Name, ctx.GlobalInt(flag.Name)) } else { cfg.RemoveCLI(flag.Name) } } // ParseUInt64Flag parses a cli.Uint64Flag from command's context and // sets default and CLI values to the application configuration. func (cfg *Config) ParseUInt64Flag(ctx *cli.Context, flag cli.Uint64Flag) { cfg.SetDefault(flag.Name, flag.Value) if ctx.IsSet(flag.Name) { cfg.SetCLI(flag.Name, ctx.Uint64(flag.Name)) } else if ctx.GlobalIsSet(flag.Name) { cfg.SetCLI(flag.Name, ctx.GlobalUint64(flag.Name)) } else { cfg.RemoveCLI(flag.Name) } } // ParseFloat64Flag parses a cli.Float64Flag from command's context and // sets default and CLI values to the application configuration. func (cfg *Config) ParseFloat64Flag(ctx *cli.Context, flag cli.Float64Flag) { cfg.SetDefault(flag.Name, flag.Value) if ctx.IsSet(flag.Name) { cfg.SetCLI(flag.Name, ctx.Float64(flag.Name)) } else if ctx.GlobalIsSet(flag.Name) { cfg.SetCLI(flag.Name, ctx.GlobalFloat64(flag.Name)) } else { cfg.RemoveCLI(flag.Name) } } // ParseDurationFlag parses a cli.DurationFlag from command's context and // sets default and CLI values to the application configuration. func (cfg *Config) ParseDurationFlag(ctx *cli.Context, flag cli.DurationFlag) { cfg.SetDefault(flag.Name, flag.Value) if ctx.IsSet(flag.Name) { cfg.SetCLI(flag.Name, ctx.Duration(flag.Name)) } else if ctx.GlobalIsSet(flag.Name) { cfg.SetCLI(flag.Name, ctx.GlobalDuration(flag.Name)) } else { cfg.RemoveCLI(flag.Name) } } // ParseStringFlag parses a cli.StringFlag from command's context and // sets default and CLI values to the application configuration. func (cfg *Config) ParseStringFlag(ctx *cli.Context, flag cli.StringFlag) { cfg.SetDefault(flag.Name, flag.Value) if ctx.IsSet(flag.Name) { cfg.SetCLI(flag.Name, ctx.String(flag.Name)) } else if ctx.GlobalIsSet(flag.Name) { cfg.SetCLI(flag.Name, ctx.GlobalString(flag.Name)) } else { cfg.RemoveCLI(flag.Name) } } // ParseStringSliceFlag parses a cli.StringSliceFlag from command's context and // sets default and CLI values to the application configuration. func (cfg *Config) ParseStringSliceFlag(ctx *cli.Context, flag cli.StringSliceFlag) { cfg.SetDefault(flag.Name, flag.Value) if ctx.IsSet(flag.Name) { cfg.SetCLI(flag.Name, ctx.StringSlice(flag.Name)) } else if ctx.GlobalIsSet(flag.Name) { cfg.SetCLI(flag.Name, ctx.GlobalStringSlice(flag.Name)) } else { cfg.RemoveCLI(flag.Name) } } // GetBool shorthand for getting current configuration value for cli.BoolFlag. func GetBool(flag cli.BoolFlag) bool { return Current.GetBool(flag.Name) } // GetTBool shorthand for getting current configuration value for cli.BoolTFlag. func GetTBool(flag cli.BoolTFlag) bool { return Current.GetBool(flag.Name) } // GetInt shorthand for getting current configuration value for cli.IntFlag. func GetInt(flag cli.IntFlag) int { return Current.GetInt(flag.Name) } // GetString shorthand for getting current configuration value for cli.StringFlag. func GetString(flag cli.StringFlag) string { return Current.GetString(flag.Name) } // GetStringSlice shorthand for getting current configuration value for cli.StringSliceFlag. func GetStringSlice(flag cli.StringSliceFlag) []string { return Current.GetStringSlice(flag.Name) } // GetDuration shorthand for getting current configuration value for cli.DurationFlag. func GetDuration(flag cli.DurationFlag) time.Duration { return Current.GetDuration(flag.Name) } // GetUInt64 shorthand for getting current configuration value for cli.Uint64Flag. func GetUInt64(flag cli.Uint64Flag) uint64 { return Current.GetUInt64(flag.Name) } // GetFloat64 shorthand for getting current configuration value for cli.Uint64Flag. func GetFloat64(flag cli.Float64Flag) float64 { return Current.GetFloat64(flag.Name) } // Topic returns event bus topic for the given config key to listen for its updates. func Topic(configKey string) string { return "config:" + configKey }
1
15,381
Why to `Get()` value twice, while u have it 2 lines before
mysteriumnetwork-node
go
@@ -310,10 +310,17 @@ class ServiceProvider extends ModuleServiceProvider $this->registerConsoleCommand('october.update', 'System\Console\OctoberUpdate'); $this->registerConsoleCommand('october.util', 'System\Console\OctoberUtil'); $this->registerConsoleCommand('october.mirror', 'System\Console\OctoberMirror'); + $this->registerConsoleCommand('october.mirror', 'System\Console\OctoberFresh'); + $this->registerConsoleCommand('plugin.install', 'System\Console\PluginInstall'); $this->registerConsoleCommand('plugin.remove', 'System\Console\PluginRemove'); $this->registerConsoleCommand('plugin.refresh', 'System\Console\PluginRefresh'); + $this->registerConsoleCommand('theme.use', 'System\Console\ThemeUse'); + $this->registerConsoleCommand('theme.list', 'System\Console\ThemeList'); + $this->registerConsoleCommand('theme.install', 'System\Console\ThemeInstall'); + $this->registerConsoleCommand('theme.delete', 'System\Console\ThemeDelete'); + /* * Register the sidebar for the System main menu */
1
<?php namespace System; use App; use Lang; use Event; use Config; use Backend; use Request; use DbDongle; use BackendMenu; use BackendAuth; use Twig_Environment; use Twig_Loader_String; use System\Classes\ErrorHandler; use System\Classes\MarkupManager; use System\Classes\PluginManager; use System\Classes\SettingsManager; use System\Twig\Engine as TwigEngine; use System\Twig\Loader as TwigLoader; use System\Twig\Extension as TwigExtension; use System\Models\EventLog; use System\Models\MailSettings; use System\Models\MailTemplate; use Backend\Classes\WidgetManager; use October\Rain\Support\ModuleServiceProvider; use October\Rain\Router\Helper as RouterHelper; class ServiceProvider extends ModuleServiceProvider { /** * Register the service provider. * * @return void */ public function register() { /* * Register self */ parent::register('system'); /* * Register singletons */ App::singleton('backend.helper', function () { return new \Backend\Helpers\Backend; }); App::singleton('backend.menu', function () { return \Backend\Classes\NavigationManager::instance(); }); App::singleton('backend.auth', function () { return \Backend\Classes\AuthManager::instance(); }); $this->registerPrivilegedActions(); /* * Register all plugins */ $pluginManager = PluginManager::instance(); $pluginManager->registerAll(); /* * Allow plugins to use the scheduler */ Event::listen('console.schedule', function($schedule) use ($pluginManager) { foreach ($pluginManager->getPlugins() as $plugin) { if (method_exists($plugin, 'registerSchedule')) { $plugin->registerSchedule($schedule); } } }); /* * Error handling for uncaught Exceptions */ Event::listen('exception.beforeRender', function ($exception, $httpCode, $request){ $handler = new ErrorHandler; return $handler->handleException($exception); }); /* * Write all log events to the database */ Event::listen('illuminate.log', function ($level, $message, $context) { if (DbDongle::hasDatabase() && !defined('OCTOBER_NO_EVENT_LOGGING')) { EventLog::add($message, $level); } }); /* * Register basic Twig */ App::singleton('twig', function ($app) { $twig = new Twig_Environment(new TwigLoader(), ['auto_reload' => true]); $twig->addExtension(new TwigExtension); return $twig; }); /* * Register .htm extension for Twig views */ App::make('view')->addExtension('htm', 'twig', function () { return new TwigEngine(App::make('twig')); }); /* * Register Twig that will parse strings */ App::singleton('twig.string', function ($app) { $twig = $app['twig']; $twig->setLoader(new Twig_Loader_String); return $twig; }); /* * Override system mailer with mail settings */ Event::listen('mailer.beforeRegister', function () { if (MailSettings::isConfigured()) { MailSettings::applyConfigValues(); } }); /* * Override standard Mailer content with template */ Event::listen('mailer.beforeAddContent', function ($mailer, $message, $view, $data) { if (MailTemplate::addContentToMailer($message, $view, $data)) { return false; } }); /* * Register other module providers */ foreach (Config::get('cms.loadModules', []) as $module) { if (strtolower(trim($module)) == 'system') { continue; } App::register('\\' . $module . '\ServiceProvider'); } /* * Register navigation */ BackendMenu::registerCallback(function ($manager) { $manager->registerMenuItems('October.System', [ 'system' => [ 'label' => 'system::lang.settings.menu_label', 'icon' => 'icon-cog', 'url' => Backend::url('system/settings'), 'permissions' => [], 'order' => 1000 ] ]); }); /* * Register report widgets */ WidgetManager::instance()->registerReportWidgets(function ($manager) { $manager->registerReportWidget('System\ReportWidgets\Status', [ 'label' => 'backend::lang.dashboard.status.widget_title_default', 'context' => 'dashboard' ]); }); /* * Register permissions */ BackendAuth::registerCallback(function ($manager) { $manager->registerPermissions('October.System', [ 'system.manage_updates' => [ 'label' => 'system::lang.permissions.manage_software_updates', 'tab' => 'system::lang.permissions.name' ], 'system.access_logs' => [ 'label' => 'system::lang.permissions.access_logs', 'tab' => 'system::lang.permissions.name' ], 'system.manage_mail_settings' => [ 'label' => 'system::lang.permissions.manage_mail_settings', 'tab' => 'system::lang.permissions.name' ], 'system.manage_mail_templates' => [ 'label' => 'system::lang.permissions.manage_mail_templates', 'tab' => 'system::lang.permissions.name' ] ]); }); /* * Register markup tags */ MarkupManager::instance()->registerCallback(function ($manager) { $manager->registerFunctions([ // Functions 'input' => 'input', 'post' => 'post', 'get' => 'get', 'link_to' => 'link_to', 'link_to_asset' => 'link_to_asset', 'link_to_route' => 'link_to_route', 'link_to_action' => 'link_to_action', 'asset' => 'asset', 'action' => 'action', 'url' => 'url', 'route' => 'route', 'secure_url' => 'secure_url', 'secure_asset' => 'secure_asset', // Classes 'str_*' => ['Str', '*'], 'url_*' => ['URL', '*'], 'html_*' => ['HTML', '*'], 'form_*' => ['Form', '*'], 'form_macro' => ['Form', '__call'] ]); $manager->registerFilters([ // Classes 'slug' => ['Str', 'slug'], 'plural' => ['Str', 'plural'], 'singular' => ['Str', 'singular'], 'finish' => ['Str', 'finish'], 'snake' => ['Str', 'snake'], 'camel' => ['Str', 'camel'], 'studly' => ['Str', 'studly'], 'trans' => ['Lang', 'get'], 'transchoice' => ['Lang', 'choice'], 'md' => ['Markdown', 'parse'], ]); }); /* * Register settings */ SettingsManager::instance()->registerCallback(function ($manager) { $manager->registerSettingItems('October.System', [ 'updates' => [ 'label' => 'system::lang.updates.menu_label', 'description' => 'system::lang.updates.menu_description', 'category' => SettingsManager::CATEGORY_SYSTEM, 'icon' => 'icon-cloud-download', 'url' => Backend::url('system/updates'), 'permissions' => ['system.manage_updates'], 'order' => 300 ], 'administrators' => [ 'label' => 'backend::lang.user.menu_label', 'description' => 'backend::lang.user.menu_description', 'category' => SettingsManager::CATEGORY_SYSTEM, 'icon' => 'icon-users', 'url' => Backend::url('backend/users'), 'permissions' => ['backend.manage_users'], 'order' => 400 ], 'mail_settings' => [ 'label' => 'system::lang.mail.menu_label', 'description' => 'system::lang.mail.menu_description', 'category' => SettingsManager::CATEGORY_MAIL, 'icon' => 'icon-envelope', 'class' => 'System\Models\MailSettings', 'permissions' => ['system.manage_mail_settings'], 'order' => 600 ], 'mail_templates' => [ 'label' => 'system::lang.mail_templates.menu_label', 'description' => 'system::lang.mail_templates.menu_description', 'category' => SettingsManager::CATEGORY_MAIL, 'icon' => 'icon-envelope-square', 'url' => Backend::url('system/mailtemplates'), 'permissions' => ['system.manage_mail_templates'], 'order' => 610 ], 'event_logs' => [ 'label' => 'system::lang.event_log.menu_label', 'description' => 'system::lang.event_log.menu_description', 'category' => SettingsManager::CATEGORY_LOGS, 'icon' => 'icon-exclamation-triangle', 'url' => Backend::url('system/eventlogs'), 'permissions' => ['system.access_logs'], 'order' => 900 ], 'request_logs' => [ 'label' => 'system::lang.request_log.menu_label', 'description' => 'system::lang.request_log.menu_description', 'category' => SettingsManager::CATEGORY_LOGS, 'icon' => 'icon-file-o', 'url' => Backend::url('system/requestlogs'), 'permissions' => ['system.access_logs'], 'order' => 910 ] ]); }); /* * Add CMS based cache clearing to native command */ Event::listen('cache:cleared', function() { \System\Helpers\Cache::clear(); }); /* * Register console commands */ $this->registerConsoleCommand('october.up', 'System\Console\OctoberUp'); $this->registerConsoleCommand('october.down', 'System\Console\OctoberDown'); $this->registerConsoleCommand('october.update', 'System\Console\OctoberUpdate'); $this->registerConsoleCommand('october.util', 'System\Console\OctoberUtil'); $this->registerConsoleCommand('october.mirror', 'System\Console\OctoberMirror'); $this->registerConsoleCommand('plugin.install', 'System\Console\PluginInstall'); $this->registerConsoleCommand('plugin.remove', 'System\Console\PluginRemove'); $this->registerConsoleCommand('plugin.refresh', 'System\Console\PluginRefresh'); /* * Register the sidebar for the System main menu */ BackendMenu::registerContextSidenavPartial( 'October.System', 'system', '~/modules/system/partials/_system_sidebar.htm' ); } /** * Bootstrap the module events. * * @return void */ public function boot() { /* * Boot plugins */ $pluginManager = PluginManager::instance(); $pluginManager->bootAll(); parent::boot('system'); } /** * Check for CLI or system/updates route and disable any plugin initialization */ protected function registerPrivilegedActions() { $requests = ['/combine', '@/system/updates', '@/system/install', '@/backend/auth']; $commands = ['october:up', 'october:update']; /* * Requests */ $path = RouterHelper::normalizeUrl(Request::path()); $backendUri = RouterHelper::normalizeUrl(Config::get('cms.backendUri')); foreach ($requests as $request) { if (substr($request, 0, 1) == '@') { $request = $backendUri . substr($request, 1); } if (stripos($path, $request) === 0) { PluginManager::$noInit = true; } } /* * CLI */ if (App::runningInConsole() && count(array_intersect($commands, Request::server('argv'))) > 0) { PluginManager::$noInit = true; } } }
1
10,798
There is a typo here, this needs to state `october.fresh`
octobercms-october
php
@@ -139,6 +139,18 @@ public class TestHadoopTables { Assert.assertEquals("Transform must match", transform, sortOrder.fields().get(0).transform()); } + @Test + public void testTableName() { + PartitionSpec spec = PartitionSpec.builderFor(SCHEMA) + .bucket("data", 16) + .build(); + String location = tableDir.toURI().toString(); + TABLES.create(SCHEMA, spec, location); + + Table table = TABLES.load(location); + Assert.assertEquals("Table name must match", location, table.name()); + } + private static void createDummyTable(File tableDir, File dataDir) throws IOException { Table table = TABLES.create(SCHEMA, tableDir.toURI().toString()); AppendFiles append = table.newAppend();
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.hadoop; import java.io.File; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.util.ArrayList; import org.apache.iceberg.AppendFiles; import org.apache.iceberg.AssertHelpers; import org.apache.iceberg.DataFile; import org.apache.iceberg.DataFiles; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.SortOrder; import org.apache.iceberg.Table; import org.apache.iceberg.exceptions.NoSuchTableException; import org.apache.iceberg.relocated.com.google.common.collect.Maps; import org.apache.iceberg.transforms.Transform; import org.apache.iceberg.transforms.Transforms; import org.apache.iceberg.types.Types; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import static org.apache.iceberg.NullOrder.NULLS_FIRST; import static org.apache.iceberg.SortDirection.ASC; import static org.apache.iceberg.types.Types.NestedField.required; public class TestHadoopTables { private static final HadoopTables TABLES = new HadoopTables(); private static final Schema SCHEMA = new Schema( required(1, "id", Types.IntegerType.get(), "unique ID"), required(2, "data", Types.StringType.get()) ); @Rule public TemporaryFolder temp = new TemporaryFolder(); private File tableDir = null; @Before public void setupTableLocation() throws Exception { tableDir = temp.newFolder(); } @Test public void testDropTable() { TABLES.create(SCHEMA, tableDir.toURI().toString()); TABLES.dropTable(tableDir.toURI().toString()); AssertHelpers.assertThrows( "Should complain about missing table", NoSuchTableException.class, "Table does not exist", () -> TABLES.load(tableDir.toURI().toString())); } @Test public void testDropTableWithPurge() throws IOException { File dataDir = temp.newFolder(); createDummyTable(tableDir, dataDir); TABLES.dropTable(tableDir.toURI().toString(), true); AssertHelpers.assertThrows( "Should complain about missing table", NoSuchTableException.class, "Table does not exist", () -> TABLES.load(tableDir.toURI().toString())); Assert.assertEquals(0, dataDir.listFiles().length); Assert.assertFalse(tableDir.exists()); Assert.assertFalse(TABLES.dropTable(tableDir.toURI().toString())); } @Test public void testDropTableWithoutPurge() throws IOException { File dataDir = temp.newFolder(); createDummyTable(tableDir, dataDir); TABLES.dropTable(tableDir.toURI().toString(), false); AssertHelpers.assertThrows( "Should complain about missing table", NoSuchTableException.class, "Table does not exist", () -> TABLES.load(tableDir.toURI().toString())); Assert.assertEquals(1, dataDir.listFiles().length); Assert.assertFalse(tableDir.exists()); Assert.assertFalse(TABLES.dropTable(tableDir.toURI().toString())); } @Test public void testDefaultSortOrder() { PartitionSpec spec = PartitionSpec.builderFor(SCHEMA) .bucket("data", 16) .build(); Table table = TABLES.create(SCHEMA, spec, tableDir.toURI().toString()); SortOrder sortOrder = table.sortOrder(); Assert.assertEquals("Order ID must match", 0, sortOrder.orderId()); Assert.assertTrue("Order must unsorted", sortOrder.isUnsorted()); } @Test public void testCustomSortOrder() { PartitionSpec spec = PartitionSpec.builderFor(SCHEMA) .bucket("data", 16) .build(); SortOrder order = SortOrder.builderFor(SCHEMA) .asc("id", NULLS_FIRST) .build(); Table table = TABLES.create(SCHEMA, spec, order, Maps.newHashMap(), tableDir.toURI().toString()); SortOrder sortOrder = table.sortOrder(); Assert.assertEquals("Order ID must match", 1, sortOrder.orderId()); Assert.assertEquals("Order must have 1 field", 1, sortOrder.fields().size()); Assert.assertEquals("Direction must match ", ASC, sortOrder.fields().get(0).direction()); Assert.assertEquals("Null order must match ", NULLS_FIRST, sortOrder.fields().get(0).nullOrder()); Transform<?, ?> transform = Transforms.identity(Types.IntegerType.get()); Assert.assertEquals("Transform must match", transform, sortOrder.fields().get(0).transform()); } private static void createDummyTable(File tableDir, File dataDir) throws IOException { Table table = TABLES.create(SCHEMA, tableDir.toURI().toString()); AppendFiles append = table.newAppend(); String data = dataDir.getPath() + "/data.parquet"; Files.write(Paths.get(data), new ArrayList<>(), StandardCharsets.UTF_8); DataFile dataFile = DataFiles.builder(PartitionSpec.unpartitioned()) .withPath(data) .withFileSizeInBytes(10) .withRecordCount(1) .build(); append.appendFile(dataFile); append.commit(); // Make sure that the data file and the manifest dir is created Assert.assertEquals(1, dataDir.listFiles().length); Assert.assertEquals(1, tableDir.listFiles().length); } }
1
25,824
Metadata tables loaded through `HadoopTables` will have their names as location.type (which is weird as we normally use location#type). I am not sure whether it is a big deal or not.
apache-iceberg
java
@@ -343,7 +343,7 @@ Blockly.VerticalFlyout.prototype.wheel_ = function(e) { delta *= 10; } var metrics = this.getMetrics_(); - var pos = metrics.viewTop + delta; + var pos = -this.workspace_.scrollY + delta; var limit = metrics.contentHeight - metrics.viewHeight; pos = Math.min(pos, limit); pos = Math.max(pos, 0);
1
/** * @license * Visual Blocks Editor * * Copyright 2011 Google Inc. * https://developers.google.com/blockly/ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @fileoverview Layout code for a vertical variant of the flyout. * @author [email protected] (Rachel Fenichel) */ 'use strict'; goog.provide('Blockly.VerticalFlyout'); goog.require('Blockly.Block'); goog.require('Blockly.Comment'); goog.require('Blockly.Events'); goog.require('Blockly.Flyout'); goog.require('Blockly.FlyoutButton'); goog.require('Blockly.utils'); goog.require('Blockly.WorkspaceSvg'); goog.require('goog.dom'); goog.require('goog.events'); goog.require('goog.math.Rect'); goog.require('goog.userAgent'); /** * Class for a flyout. * @param {!Object} workspaceOptions Dictionary of options for the workspace. * @extends {Blockly.Flyout} * @constructor */ Blockly.VerticalFlyout = function(workspaceOptions) { workspaceOptions.getMetrics = this.getMetrics_.bind(this); workspaceOptions.setMetrics = this.setMetrics_.bind(this); Blockly.VerticalFlyout.superClass_.constructor.call(this, workspaceOptions); /** * Flyout should be laid out horizontally vs vertically. * @type {boolean} * @private */ this.horizontalLayout_ = false; /** * List of checkboxes next to variable blocks. * Each element is an object containing the SVG for the checkbox, a boolean * for its checked state, and the block the checkbox is associated with. * @type {!Array.<!Object>} * @private */ this.checkboxes_ = []; }; goog.inherits(Blockly.VerticalFlyout, Blockly.Flyout); /** * Does the flyout automatically close when a block is created? * @type {boolean} */ Blockly.VerticalFlyout.prototype.autoClose = false; /** * The width of the flyout, if not otherwise specified. * @type {number} */ Blockly.VerticalFlyout.prototype.DEFAULT_WIDTH = 250; /** * Size of a checkbox next to a variable reporter. * @type {number} * @const */ Blockly.VerticalFlyout.prototype.CHECKBOX_SIZE = 20; /** * SVG path data for checkmark in checkbox. * @type {string} * @const */ Blockly.VerticalFlyout.prototype.CHECKMARK_PATH = 'M' + Blockly.VerticalFlyout.prototype.CHECKBOX_SIZE / 4 + ' ' + Blockly.VerticalFlyout.prototype.CHECKBOX_SIZE / 2 + 'L' + 5 * Blockly.VerticalFlyout.prototype.CHECKBOX_SIZE / 12 + ' ' + 2 * Blockly.VerticalFlyout.prototype.CHECKBOX_SIZE / 3 + 'L' + 3 * Blockly.VerticalFlyout.prototype.CHECKBOX_SIZE / 4 + ' ' + Blockly.VerticalFlyout.prototype.CHECKBOX_SIZE / 3; /** * Size of the checkbox corner radius * @type {number} * @const */ Blockly.VerticalFlyout.prototype.CHECKBOX_CORNER_RADIUS = 5; /** * Space above and around the checkbox. * @type {number} * @const */ Blockly.VerticalFlyout.prototype.CHECKBOX_MARGIN = Blockly.Flyout.prototype.MARGIN; /** * Total additional width of a row that contains a checkbox. * @type {number} * @const */ Blockly.VerticalFlyout.prototype.CHECKBOX_SPACE_X = Blockly.VerticalFlyout.prototype.CHECKBOX_SIZE + 2 * Blockly.VerticalFlyout.prototype.CHECKBOX_MARGIN; /** * Initializes the flyout. * @param {!Blockly.Workspace} targetWorkspace The workspace in which to create * new blocks. */ Blockly.VerticalFlyout.prototype.init = function(targetWorkspace) { Blockly.VerticalFlyout.superClass_.init.call(this, targetWorkspace); this.workspace_.scale = targetWorkspace.scale; }; /** * Creates the flyout's DOM. Only needs to be called once. * @param {string} tagName HTML element * @return {!Element} The flyout's SVG group. */ Blockly.VerticalFlyout.prototype.createDom = function(tagName) { Blockly.VerticalFlyout.superClass_.createDom.call(this, tagName); /* <defs> <clipPath id="blocklyBlockMenuClipPath"> <rect id="blocklyBlockMenuClipRect" height="1147px" width="248px" y="0" x="0"> </rect> </clipPath> </defs> */ this.defs_ = Blockly.utils.createSvgElement('defs', {}, this.svgGroup_); var clipPath = Blockly.utils.createSvgElement('clipPath', {'id':'blocklyBlockMenuClipPath'}, this.defs_); this.clipRect_ = Blockly.utils.createSvgElement('rect', {'id': 'blocklyBlockMenuClipRect', 'height': '0', 'width': '0', 'y': '0', 'x': '0' }, clipPath); this.workspace_.svgGroup_.setAttribute('clip-path', 'url(#blocklyBlockMenuClipPath)'); return this.svgGroup_; }; /** * Return an object with all the metrics required to size scrollbars for the * flyout. The following properties are computed: * .viewHeight: Height of the visible rectangle, * .viewWidth: Width of the visible rectangle, * .contentHeight: Height of the contents, * .contentWidth: Width of the contents, * .viewTop: Offset of top edge of visible rectangle from parent, * .contentTop: Offset of the top-most content from the y=0 coordinate, * .absoluteTop: Top-edge of view. * .viewLeft: Offset of the left edge of visible rectangle from parent, * .contentLeft: Offset of the left-most content from the x=0 coordinate, * .absoluteLeft: Left-edge of view. * @return {Object} Contains size and position metrics of the flyout. * @private */ Blockly.VerticalFlyout.prototype.getMetrics_ = function() { if (!this.isVisible()) { // Flyout is hidden. return null; } try { var optionBox = this.workspace_.getCanvas().getBBox(); } catch (e) { // Firefox has trouble with hidden elements (Bug 528969). var optionBox = {height: 0, y: 0, width: 0, x: 0}; } // Padding for the end of the scrollbar. var absoluteTop = this.SCROLLBAR_PADDING; var absoluteLeft = 0; var viewHeight = this.height_ - 2 * this.SCROLLBAR_PADDING; var viewWidth = this.getWidth() - this.SCROLLBAR_PADDING; var metrics = { viewHeight: viewHeight, viewWidth: viewWidth, contentHeight: optionBox.height * this.workspace_.scale + 2 * this.MARGIN, contentWidth: optionBox.width * this.workspace_.scale + 2 * this.MARGIN, viewTop: -this.workspace_.scrollY + optionBox.y, viewLeft: -this.workspace_.scrollX, contentTop: optionBox.y, contentLeft: optionBox.x, absoluteTop: absoluteTop, absoluteLeft: absoluteLeft }; return metrics; }; /** * Sets the translation of the flyout to match the scrollbars. * @param {!Object} xyRatio Contains a y property which is a float * between 0 and 1 specifying the degree of scrolling and a * similar x property. * @private */ Blockly.VerticalFlyout.prototype.setMetrics_ = function(xyRatio) { var metrics = this.getMetrics_(); // This is a fix to an apparent race condition. if (!metrics) { return; } if (goog.isNumber(xyRatio.y)) { this.workspace_.scrollY = -metrics.contentHeight * xyRatio.y; } this.workspace_.translate(this.workspace_.scrollX + metrics.absoluteLeft, this.workspace_.scrollY + metrics.absoluteTop); this.clipRect_.setAttribute('height', metrics.viewHeight + 'px'); this.clipRect_.setAttribute('width', metrics.viewWidth + 'px'); }; /** * Move the flyout to the edge of the workspace. */ Blockly.VerticalFlyout.prototype.position = function() { if (!this.isVisible()) { return; } var targetWorkspaceMetrics = this.targetWorkspace_.getMetrics(); if (!targetWorkspaceMetrics) { // Hidden components will return null. return; } // This version of the flyout does not change width to fit its contents. // Instead it matches the width of its parent or uses a default value. this.width_ = this.getWidth(); if (this.parentToolbox_) { var x = this.parentToolbox_.HtmlDiv.offsetLeft; var y = this.parentToolbox_.HtmlDiv.offsetTop + this.parentToolbox_.getHeight(); } else { var x = this.toolboxPosition_ == Blockly.TOOLBOX_AT_RIGHT ? targetWorkspaceMetrics.viewWidth - this.width_ : 0; var y = 0; } // Record the height for Blockly.Flyout.getMetrics_ this.height_ = targetWorkspaceMetrics.viewHeight - y; this.setBackgroundPath_(this.width_, this.height_); this.svgGroup_.setAttribute("width", this.width_); this.svgGroup_.setAttribute("height", this.height_); var transform = 'translate(' + x + 'px,' + y + 'px)'; Blockly.utils.setCssTransform(this.svgGroup_, transform); // Update the scrollbar (if one exists). if (this.scrollbar_) { // Set the scrollbars origin to be the top left of the flyout. this.scrollbar_.setOrigin(x, y); this.scrollbar_.resize(); } // The blocks need to be visible in order to be laid out and measured // correctly, but we don't want the flyout to show up until it's properly // sized. Opacity is set to zero in show(). this.svgGroup_.style.opacity = 1; }; /** * Create and set the path for the visible boundaries of the flyout. * @param {number} width The width of the flyout, not including the * rounded corners. * @param {number} height The height of the flyout, not including * rounded corners. * @private */ Blockly.VerticalFlyout.prototype.setBackgroundPath_ = function(width, height) { var atRight = this.toolboxPosition_ == Blockly.TOOLBOX_AT_RIGHT; // Decide whether to start on the left or right. var path = ['M ' + 0 + ',0']; // Top. path.push('h', width); // Rounded corner. path.push('a', this.CORNER_RADIUS, this.CORNER_RADIUS, 0, 0, atRight ? 0 : 1, atRight ? -this.CORNER_RADIUS : this.CORNER_RADIUS, this.CORNER_RADIUS); // Side closest to workspace. path.push('v', Math.max(0, height - this.CORNER_RADIUS * 2)); // Rounded corner. path.push('a', this.CORNER_RADIUS, this.CORNER_RADIUS, 0, 0, atRight ? 0 : 1, atRight ? this.CORNER_RADIUS : -this.CORNER_RADIUS, this.CORNER_RADIUS); // Bottom. path.push('h', -width); path.push('z'); this.svgBackground_.setAttribute('d', path.join(' ')); }; /** * Scroll the flyout to the top. */ Blockly.VerticalFlyout.prototype.scrollToStart = function() { this.scrollbar_.set(0); }; /** * Scroll the flyout. * @param {!Event} e Mouse wheel scroll event. * @private */ Blockly.VerticalFlyout.prototype.wheel_ = function(e) { var delta = e.deltaY; if (delta) { if (goog.userAgent.GECKO) { // Firefox's deltas are a tenth that of Chrome/Safari. delta *= 10; } var metrics = this.getMetrics_(); var pos = metrics.viewTop + delta; var limit = metrics.contentHeight - metrics.viewHeight; pos = Math.min(pos, limit); pos = Math.max(pos, 0); this.scrollbar_.set(pos); // When the flyout moves from a wheel event, hide WidgetDiv and DropDownDiv. Blockly.WidgetDiv.hide(true); Blockly.DropDownDiv.hideWithoutAnimation(); } // Don't scroll the page. e.preventDefault(); // Don't propagate mousewheel event (zooming). e.stopPropagation(); }; /** * Delete blocks and background buttons from a previous showing of the flyout. * @private */ Blockly.VerticalFlyout.prototype.clearOldBlocks_ = function() { Blockly.VerticalFlyout.superClass_.clearOldBlocks_.call(this); // Do the same for checkboxes. for (var i = 0, elem; elem = this.checkboxes_[i]; i++) { elem.block.flyoutCheckbox = null; goog.dom.removeNode(elem.svgRoot); } this.checkboxes_ = []; }; /** * Add listeners to a block that has been added to the flyout. * @param {Element} root The root node of the SVG group the block is in. * @param {!Blockly.Block} block The block to add listeners for. * @param {!Element} rect The invisible rectangle under the block that acts as * a button for that block. * @private */ Blockly.VerticalFlyout.prototype.addBlockListeners_ = function(root, block, rect) { Blockly.VerticalFlyout.superClass_.addBlockListeners_.call(this, root, block, rect); if (block.flyoutCheckbox) { this.listeners_.push(Blockly.bindEvent_(block.flyoutCheckbox.svgRoot, 'mousedown', null, this.checkboxClicked_(block.flyoutCheckbox))); } }; /** * Lay out the blocks in the flyout. * @param {!Array.<!Object>} contents The blocks and buttons to lay out. * @param {!Array.<number>} gaps The visible gaps between blocks. * @private */ Blockly.VerticalFlyout.prototype.layout_ = function(contents, gaps) { var margin = this.MARGIN; var flyoutWidth = this.getWidth() / this.workspace_.scale; var cursorX = margin; var cursorY = margin; for (var i = 0, item; item = contents[i]; i++) { if (item.type == 'block') { var block = item.block; var allBlocks = block.getDescendants(); for (var j = 0, child; child = allBlocks[j]; j++) { // Mark blocks as being inside a flyout. This is used to detect and // prevent the closure of the flyout if the user right-clicks on such a // block. child.isInFlyout = true; } var root = block.getSvgRoot(); var blockHW = block.getHeightWidth(); // Figure out where the block goes, taking into account its size, whether // we're in RTL mode, and whether it has a checkbox. var oldX = block.getRelativeToSurfaceXY().x; var newX = flyoutWidth - this.MARGIN; var moveX = this.RTL ? newX - oldX : margin; if (block.hasCheckboxInFlyout()) { this.createCheckbox_(block, cursorX, cursorY, blockHW); if (this.RTL) { moveX -= (this.CHECKBOX_SIZE + this.CHECKBOX_MARGIN); } else { moveX += this.CHECKBOX_SIZE + this.CHECKBOX_MARGIN; } } // The block moves a bit extra for the hat, but the block's rectangle // doesn't. That's because the hat actually extends up from 0. block.moveBy(moveX, cursorY + (block.startHat_ ? Blockly.BlockSvg.START_HAT_HEIGHT : 0)); var rect = this.createRect_(block, this.RTL ? moveX - blockHW.width : moveX, cursorY, blockHW, i); this.addBlockListeners_(root, block, rect); cursorY += blockHW.height + gaps[i] + (block.startHat_ ? Blockly.BlockSvg.START_HAT_HEIGHT : 0); } else if (item.type == 'button') { var button = item.button; var buttonSvg = button.createDom(); button.moveTo(cursorX, cursorY); button.show(); Blockly.bindEvent_(buttonSvg, 'mouseup', button, button.onMouseUp); this.buttons_.push(button); cursorY += button.height + gaps[i]; } } }; /** * Create and place a rectangle corresponding to the given block. * @param {!Blockly.Block} block The block to associate the rect to. * @param {number} x The x position of the cursor during this layout pass. * @param {number} y The y position of the cursor during this layout pass. * @param {!{height: number, width: number}} blockHW The height and width of the * block. * @param {number} index The index into the background buttons list where this * rect should be placed. * @return {!SVGElement} Newly created SVG element for the rectangle behind the * block. * @private */ Blockly.VerticalFlyout.prototype.createRect_ = function(block, x, y, blockHW, index) { // Create an invisible rectangle under the block to act as a button. Just // using the block as a button is poor, since blocks have holes in them. var rect = Blockly.utils.createSvgElement('rect', { 'fill-opacity': 0, 'x': x, 'y': y, 'height': blockHW.height, 'width': blockHW.width }, null); rect.tooltip = block; Blockly.Tooltip.bindMouseEvents(rect); // Add the rectangles under the blocks, so that the blocks' tooltips work. this.workspace_.getCanvas().insertBefore(rect, block.getSvgRoot()); block.flyoutRect_ = rect; this.backgroundButtons_[index] = rect; return rect; }; /** * Create and place a checkbox corresponding to the given block. * @param {!Blockly.Block} block The block to associate the checkbox to. * @param {number} cursorX The x position of the cursor during this layout pass. * @param {number} cursorY The y position of the cursor during this layout pass. * @param {!{height: number, width: number}} blockHW The height and width of the * block. * @private */ Blockly.VerticalFlyout.prototype.createCheckbox_ = function(block, cursorX, cursorY, blockHW) { var svgRoot = block.getSvgRoot(); var extraSpace = this.CHECKBOX_SIZE + this.CHECKBOX_MARGIN; var width = this.RTL ? this.getWidth() / this.workspace_.scale - extraSpace : cursorX; var height = cursorY + blockHW.height / 2 - this.CHECKBOX_SIZE / 2; var checkboxGroup = Blockly.utils.createSvgElement('g', { 'class': 'blocklyFlyoutCheckbox', 'transform': 'translate(' + width + ', ' + height + ')' }, null); Blockly.utils.createSvgElement('rect', { 'height': this.CHECKBOX_SIZE, 'width': this.CHECKBOX_SIZE, 'rx': this.CHECKBOX_CORNER_RADIUS, 'ry': this.CHECKBOX_CORNER_RADIUS }, checkboxGroup); Blockly.utils.createSvgElement('path', { 'class': 'blocklyFlyoutCheckboxPath', 'd': this.CHECKMARK_PATH }, checkboxGroup); var checkboxObj = {svgRoot: checkboxGroup, clicked: false, block: block}; block.flyoutCheckbox = checkboxObj; this.workspace_.getCanvas().insertBefore(checkboxGroup, svgRoot); this.checkboxes_.push(checkboxObj); }; /** * Respond to a click on a checkbox in the flyout. * @param {!Object} checkboxObj An object containing the svg element of the * checkbox, a boolean for the state of the checkbox, and the block the * checkbox is associated with. * @return {!Function} Function to call when checkbox is clicked. * @private */ Blockly.VerticalFlyout.prototype.checkboxClicked_ = function(checkboxObj) { return function(e) { checkboxObj.clicked = !checkboxObj.clicked; if (checkboxObj.clicked) { Blockly.utils.addClass((checkboxObj.svgRoot), 'checked'); } else { Blockly.utils.removeClass((checkboxObj.svgRoot), 'checked'); } // This event has been handled. No need to bubble up to the document. e.stopPropagation(); e.preventDefault(); }; }; /** * Explicitly set the clicked state of the checkbox for the given block. * @param {string} blockId ID of block whose checkbox should be changed. * @param {boolean} clicked True if the box should be marked clicked. */ Blockly.VerticalFlyout.prototype.setCheckboxState = function(blockId, clicked) { var block = this.workspace_.getBlockById(blockId); if (!block) { throw 'No block found in the flyout for id ' + blockId; } var checkboxObj = block.flyoutCheckbox; checkboxObj.clicked = clicked; if (checkboxObj.clicked) { Blockly.addClass_((checkboxObj.svgRoot), 'checked'); } else { Blockly.removeClass_((checkboxObj.svgRoot), 'checked'); } }; /** * Handle a mouse-move to vertically drag the flyout. * @param {!Event} e Mouse move event. * @private */ Blockly.VerticalFlyout.prototype.onMouseMove_ = function(e) { var metrics = this.getMetrics_(); if (metrics.contentHeight - metrics.viewHeight < 0) { return; } var dy = e.clientY - this.startDragMouseY_; this.startDragMouseY_ = e.clientY; var y = metrics.viewTop - dy; y = goog.math.clamp(y, 0, metrics.contentHeight - metrics.viewHeight); this.scrollbar_.set(y); }; /** * Determine if a drag delta is toward the workspace, based on the position * and orientation of the flyout. This is used in determineDragIntention_ to * determine if a new block should be created or if the flyout should scroll. * @param {number} dx X delta of the drag. * @param {number} dy Y delta of the drag. * @return {boolean} true if the drag is toward the workspace. * @private */ Blockly.VerticalFlyout.prototype.isDragTowardWorkspace_ = function(dx, dy) { // Direction goes from -180 to 180, with 0 toward the right and 90 on top. var dragDirection = Math.atan2(dy, dx) / Math.PI * 180; var draggingTowardWorkspace = false; var range = this.dragAngleRange_; if (this.toolboxPosition_ == Blockly.TOOLBOX_AT_LEFT) { // Vertical at left. if (dragDirection < range && dragDirection > -range) { draggingTowardWorkspace = true; } } else { // Vertical at right. if (dragDirection < -180 + range || dragDirection > 180 - range) { draggingTowardWorkspace = true; } } return draggingTowardWorkspace; }; /** * Copy a block from the flyout to the workspace and position it correctly. * @param {!Blockly.Block} originBlock The flyout block to copy. * @return {!Blockly.Block} The new block in the main workspace. * @private */ Blockly.VerticalFlyout.prototype.placeNewBlock_ = function(originBlock) { var targetWorkspace = this.targetWorkspace_; var svgRootOld = originBlock.getSvgRoot(); if (!svgRootOld) { throw 'originBlock is not rendered.'; } // Figure out where the original block is on the screen, relative to the upper // left corner of the main workspace. // In what coordinates? Pixels? var xyOld = Blockly.utils.getInjectionDivXY_(svgRootOld); // Take into account that the flyout might have been scrolled horizontally // (separately from the main workspace). // Generally a no-op in vertical mode but likely to happen in horizontal // mode. // var scrollX = this.workspace_.scrollX; var scale = this.workspace_.scale; // xyOld.x += scrollX / scale - scrollX; var targetMetrics = targetWorkspace.getMetrics(); // If the flyout is on the right side, (0, 0) in the flyout is offset to // the right of (0, 0) in the main workspace. Add an offset to take that // into account. var scrollX = 0; if (this.toolboxPosition_ == Blockly.TOOLBOX_AT_RIGHT) { scrollX = targetMetrics.viewWidth - this.width_; // Scale the scroll (getSvgXY_ did not do this). xyOld.x += scrollX / scale - scrollX; } // The main workspace has 0,0 at the top inside corner of the toolbox. // Need to take that into account now that the flyout is offset from there in // both directions. if (this.parentToolbox_) { // TODO (fenichel): fix these offsets to correctly deal with scaling // changes. xyOld.y += (this.parentToolbox_.getHeight()) / targetWorkspace.scale - (this.parentToolbox_.getHeight()); var xOffset = this.parentToolbox_.getWidth() / targetWorkspace.scale - this.parentToolbox_.getWidth(); if (this.toolboxPosition_ == Blockly.TOOLBOX_AT_RIGHT) { xyOld.x += xOffset; } else { xyOld.x -= xOffset; } } // Take into account that the flyout might have been scrolled vertically // (separately from the main workspace). var scrollY = this.workspace_.scrollY; xyOld.y += scrollY / scale - scrollY; // Create the new block by cloning the block in the flyout (via XML). var xml = Blockly.Xml.blockToDom(originBlock); // The target workspace would normally resize during domToBlock, which will // lead to weird jumps. Save it for terminateDrag. targetWorkspace.setResizesEnabled(false); var block = Blockly.Xml.domToBlock(xml, targetWorkspace); var svgRootNew = block.getSvgRoot(); if (!svgRootNew) { throw 'block is not rendered.'; } // Figure out where the new block got placed on the screen, relative to the // upper left corner of the workspace. This may not be the same as the // original block because the flyout's origin may not be the same as the // main workspace's origin. var xyNew = Blockly.utils.getInjectionDivXY_(svgRootNew); // Scale the scroll (getSvgXY_ did not do this). xyNew.x += targetWorkspace.scrollX / targetWorkspace.scale - targetWorkspace.scrollX; xyNew.y += targetWorkspace.scrollY / targetWorkspace.scale - targetWorkspace.scrollY; // Move the new block to where the old block is. var dx = ((scale * xyOld.x) - (targetWorkspace.scale * xyNew.x)) / targetWorkspace.scale; var dy = ((scale * xyOld.y) - (targetWorkspace.scale * xyNew.y)) / targetWorkspace.scale; block.moveBy(dx, dy); return block; }; /** * Return the deletion rectangle for this flyout in viewport coordinates. * @return {goog.math.Rect} Rectangle in which to delete. */ Blockly.VerticalFlyout.prototype.getClientRect = function() { if (!this.svgGroup_) { return null; } var flyoutRect = this.svgGroup_.getBoundingClientRect(); // BIG_NUM is offscreen padding so that blocks dragged beyond the shown flyout // area are still deleted. Must be larger than the largest screen size, // but be smaller than half Number.MAX_SAFE_INTEGER (not available on IE). var BIG_NUM = 1000000000; var x = flyoutRect.left; var width = flyoutRect.width; if (this.toolboxPosition_ == Blockly.TOOLBOX_AT_LEFT) { return new goog.math.Rect(x - BIG_NUM, -BIG_NUM, BIG_NUM + width, BIG_NUM * 2); } else { // Right return new goog.math.Rect(x, -BIG_NUM, BIG_NUM + width, BIG_NUM * 2); } }; /** * Compute width of flyout. Position button under each block. * For RTL: Lay out the blocks right-aligned. * @param {!Array<!Blockly.Block>} blocks The blocks to reflow. */ Blockly.VerticalFlyout.prototype.reflowInternal_ = function(/* blocks */) { // This is a no-op because the flyout is a fixed size. return; };
1
8,294
Can you explain why you went from a positive value (metrics.viewTop) to a negative value?
LLK-scratch-blocks
js
@@ -91,4 +91,11 @@ public interface ExternalTaskRestService { @Produces(MediaType.APPLICATION_JSON) BatchDto setRetriesAsync(SetRetriesForExternalTasksDto retriesDto); + @GET + @Path("/topic-names") + @Produces(MediaType.APPLICATION_JSON) + List<String> getTopicNames(@QueryParam("withLockedTasks") boolean withLockedTasks, + @QueryParam("withUnlockedTasks") boolean withUnlockedTasks, + @QueryParam("withRetriesLeft") boolean withRetriesLeft); + }
1
/* * Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH * under one or more contributor license agreements. See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. Camunda licenses this file to you under the Apache License, * Version 2.0; you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.camunda.bpm.engine.rest; import java.util.List; import javax.ws.rs.Consumes; import javax.ws.rs.GET; import javax.ws.rs.POST; import javax.ws.rs.PUT; import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.Produces; import javax.ws.rs.QueryParam; import javax.ws.rs.core.Context; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.UriInfo; import org.camunda.bpm.engine.rest.dto.CountResultDto; import org.camunda.bpm.engine.rest.dto.batch.BatchDto; import org.camunda.bpm.engine.rest.dto.externaltask.ExternalTaskDto; import org.camunda.bpm.engine.rest.dto.externaltask.ExternalTaskQueryDto; import org.camunda.bpm.engine.rest.dto.externaltask.FetchExternalTasksDto; import org.camunda.bpm.engine.rest.dto.externaltask.LockedExternalTaskDto; import org.camunda.bpm.engine.rest.dto.externaltask.SetRetriesForExternalTasksDto; import org.camunda.bpm.engine.rest.sub.externaltask.ExternalTaskResource; /** * @author Thorben Lindhauer * */ public interface ExternalTaskRestService { public static final String PATH = "/external-task"; @GET @Produces(MediaType.APPLICATION_JSON) List<ExternalTaskDto> getExternalTasks(@Context UriInfo uriInfo, @QueryParam("firstResult") Integer firstResult, @QueryParam("maxResults") Integer maxResults); @POST @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) List<ExternalTaskDto> queryExternalTasks(ExternalTaskQueryDto query, @QueryParam("firstResult") Integer firstResult, @QueryParam("maxResults") Integer maxResults); @GET @Path("/count") @Produces(MediaType.APPLICATION_JSON) CountResultDto getExternalTasksCount(@Context UriInfo uriInfo); @POST @Path("/count") @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) CountResultDto queryExternalTasksCount(ExternalTaskQueryDto query); @POST @Path("/fetchAndLock") @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) List<LockedExternalTaskDto> fetchAndLock(FetchExternalTasksDto fetchingDto); @Path("/{id}") ExternalTaskResource getExternalTask(@PathParam("id") String externalTaskId); @PUT @Path("/retries") @Consumes(MediaType.APPLICATION_JSON) void setRetries(SetRetriesForExternalTasksDto retriesDto); @POST @Path("/retries-async") @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) BatchDto setRetriesAsync(SetRetriesForExternalTasksDto retriesDto); }
1
10,227
Let's use a dedicated DTO instead of `List<String>` to remain consistent with all other existing REST API endpoints: * Introduce a new DTO class (e. g. `ExternalTaskTopicNameDto`) located under `org.camunda.bpm.engine.rest.dto.externaltask` * The class should have the attribute `topicName` of type `String` * Introduce a getter & setter for the attribute * Convert the list of strings to a list of `ExternalTaskTopicNameDto`s * Change the method signature to return `List<ExternalTaskTopicNameDto>`
camunda-camunda-bpm-platform
java
@@ -42,6 +42,14 @@ public interface RepositoryManager { */ ScriptDTO getScript(List<String> path); + /** + * This method returns the {@link org.phoenicis.repository.dto.ScriptDTO} with the given ID + * + * @param id The script ID + * @return The found ScriptDTO + */ + ScriptDTO getScript(String id); + /** * This method moves the repository, belonging to the given repository url, to the given index. * This is done by swapping the current content at the given index with old index of the given repository url
1
package org.phoenicis.repository; import org.phoenicis.repository.dto.ApplicationDTO; import org.phoenicis.repository.dto.RepositoryDTO; import org.phoenicis.repository.location.RepositoryLocation; import org.phoenicis.repository.dto.ScriptDTO; import org.phoenicis.repository.types.Repository; import java.util.List; import java.util.function.Consumer; /** * This Interface contains all methods a RepositoryManager must implement. * * @author marc * @since 07.04.17 */ public interface RepositoryManager { /** * This method adds a corresponding pair of callbacks to this repository manager * * @param onRepositoryChange The callback that should be called with the new RepositoryDTO when the repository * change succeeded * @param onError The callback that should be called when the repository change failed */ void addCallbacks(Consumer<RepositoryDTO> onRepositoryChange, Consumer<Exception> onError); /** * This method returns the {@link org.phoenicis.repository.dto.ApplicationDTO}, which can be found at the given * path. * * @param path The path, where the searched ApplicationDTO can be found * @return The found ApplicationDTO */ ApplicationDTO getApplication(List<String> path); /** * This method returns the {@link org.phoenicis.repository.dto.ScriptDTO}, which can be found at the given path * * @param path The path, where the searched ScriptDTO can be found * @return The found ScriptDTO */ ScriptDTO getScript(List<String> path); /** * This method moves the repository, belonging to the given repository url, to the given index. * This is done by swapping the current content at the given index with old index of the given repository url * After this method has been called {@link #triggerRepositoryChange()} will be called once. * * @param repositoryUrl The repository url belonging to the repository that should be moved to @param toIndex * @param toIndex The index, to which the repository should be moved */ void moveRepository(RepositoryLocation<? extends Repository> repositoryUrl, int toIndex); /** * This method updates the repositories list maintained by this {@link RepositoryManager} * * @param repositoryLocations The new repository locations */ void updateRepositories(final List<RepositoryLocation<? extends Repository>> repositoryLocations); /** * This method adds a number of given repositories to this manager. This is done by inserting the repositories at * the given position. * After this method has been called {@link #triggerRepositoryChange()} will be called once. * * @param index The start position, where the repositories should be added * @param repositoryUrls An array containing the urls to the to be added repositories */ void addRepositories(int index, RepositoryLocation<? extends Repository>... repositoryUrls); /** * This method adds a number of given repositories to this manager. This is done by appending the repositories at * the end, which makes them the lowest priority. * After this method has been called {@link #triggerRepositoryChange()} will be called once. * * @param repositoryUrls An array containing the urls to the to be added repositories */ void addRepositories(RepositoryLocation<? extends Repository>... repositoryUrls); /** * This method removes the repositories belonging to the given array of repository urls from this manager. * After this method has been called {@link #triggerRepositoryChange()} will be called once. * * @param repositoryUrls An array containing the urls of the to be removed repositories. */ void removeRepositories(RepositoryLocation<? extends Repository>... repositoryUrls); /** * This method will fetch a new list of {@link org.phoenicis.repository.dto.CategoryDTO}s from the managed * repositories. * After the new category dtos have been fetched, this method will call the previously added onRepositoryChange * callbacks with the newly fetched category dtos. * If an error appeared, the onError callbacks will be called, with the error. */ void triggerRepositoryChange(); /** * This method triggers all registered callbacks. * In contrast to {@link #triggerRepositoryChange()}, it does not update the repository before. */ void triggerCallbacks(); }
1
13,335
Do you plan to remove the `ScriptDTO getScript(List<String> path);` method long-term?
PhoenicisOrg-phoenicis
java
@@ -134,12 +134,14 @@ }, types: function() { var self = this; - var result = this.allTypes.filter(function(item) { + if (this.enabledTypes && !this.enabledTypes.length) { + return this.allTypes; + } + return this.allTypes.filter(function(item) { return self.enabledTypes.some(function(enabledItem) { return enabledItem === item.value; }); }); - return result; } } });
1
/*global countlyVue, CV, Vue */ (function() { /** * DRAWER HELPERS */ var MetricComponent = countlyVue.views.create({ template: CV.T('/dashboards/templates/helpers/drawer/metric.html'), props: { dataType: { type: String }, multiple: { type: Boolean, default: false }, multipleLimit: { type: Number, default: 3 }, placeholder: { type: String }, value: { type: Array } }, data: function() { return { metrics: { session: [ { label: this.i18n("sidebar.analytics.sessions"), value: "t" }, { label: this.i18n("sidebar.analytics.users"), value: "u" }, { label: this.i18n("common.table.new-users"), value: "n" } ], event: [ { label: this.i18n("events.table.count"), value: "c" }, { label: this.i18n("events.table.sum"), value: "s" }, { label: this.i18n("events.table.dur"), value: "dur" } ], push: [ { label: this.i18n("dashboards.sent"), value: "sent" }, { label: this.i18n("dashboards.actioned"), value: "actioned" } ], crash: [ { label: this.i18n("dashboards.crf"), value: "crf" }, { label: this.i18n("dashboards.crnf"), value: "crnf" }, { label: this.i18n("dashboards.cruf"), value: "cruf" }, { label: this.i18n("dashboards.crunf"), value: "crunf" } ] } }; }, computed: { selectedMetrics: function() { return this.metrics[this.dataType]; }, placeholderText: function() { if (this.placeholder) { return this.placeholder; } if (this.multiple) { return this.i18n("placeholder.dashboards.select-metric-multi", this.multipleLimit); } else { return this.i18n("placeholder.dashboards.select-metric-single"); } }, val: function() { if (!this.multiple) { return this.value && this.value[0] || ""; } return this.value; } }, methods: { change: function(item) { var i = item; if (!this.multiple) { i = [item]; } this.$emit("input", i); } } }); var DataTypeComponent = countlyVue.views.create({ template: CV.T('/dashboards/templates/helpers/drawer/data-type.html'), props: { placeholder: { type: String }, enabledTypes: { type: Array, default: function() { return []; } } }, data: function() { return { allTypes: [ { value: "session", label: this.i18n("dashboards.session") }, { value: "event", label: this.i18n("dashboards.event") }, { value: "push", label: this.i18n("dashboards.push") }, { value: "crash", label: this.i18n("dashboards.crash") }, ] }; }, computed: { placeholderText: function() { if (this.placeholder) { return this.placeholder; } return this.i18n("placeholder.dashbaords.select-data-type"); }, types: function() { var self = this; var result = this.allTypes.filter(function(item) { return self.enabledTypes.some(function(enabledItem) { return enabledItem === item.value; }); }); return result; } } }); var AppCountComponent = countlyVue.views.create({ template: CV.T('/dashboards/templates/helpers/drawer/app-count.html'), props: { apps: { type: Array, default: [] } }, data: function() { return { count: null }; }, computed: { appCount: { get: function() { if (!this.count) { return (this.apps.length > 1) ? "multiple" : "single"; } return this.count; }, set: function(v) { this.count = v; } } } }); /** * Source app component returns the selected apps in an array even if single app is selected */ var SourceAppsComponent = countlyVue.views.create({ template: CV.T('/dashboards/templates/helpers/drawer/source-apps.html'), props: { multiple: { type: Boolean, default: false }, multipleLimit: { type: Number, default: 4 }, placeholder: { type: String }, value: { type: Array } }, data: function() { return {}; }, computed: { placeholderText: function() { if (this.placeholder) { return this.placeholder; } if (this.multiple) { return this.i18n("placeholder.dashboards.select-applications-multi", this.multipleLimit); } else { return this.i18n("placeholder.dashboards.select-applications-single"); } }, selectedApps: { get: function() { if (!this.multiple) { return this.value && this.value[0] || ""; } return this.value; }, set: function(item) { var i = item; if (!this.multiple) { i = [item]; } this.$emit("input", i); } } } }); var VisualizationComponent = countlyVue.views.create({ template: CV.T('/dashboards/templates/helpers/drawer/visualization.html'), props: { extraTypes: { type: Array, default: function() { return []; } }, enabledTypes: { type: Array, default: null }, value: String }, data: function() { return { types: [ { value: "time-series", label: this.i18n("dashboards.visualization.time-series") }, { value: "bar-chart", label: this.i18n("dashboards.visualization.bar-chart") }, { value: "number", label: this.i18n("dashboards.visualization.number") }, { value: "table", label: this.i18n("dashboards.visualization.table") }, ] }; }, computed: { visualizationTypes: function() { var fullList = this.types.concat(this.extraTypes); fullList.sort(function(a, b) { return (a.priority || 0) - (b.priority || 0); }); if (this.enabledTypes) { var self = this; return fullList.filter(function(item) { return self.enabledTypes.includes(item.value); }); } return fullList; }, selectedType: function() { return this.value; }, isSelected: function() { return this.selectedType ? true : false; } }, methods: { onClick: function(item) { this.$emit("input", item.value); } } }); var TitleComponent = countlyVue.views.create({ template: CV.T('/dashboards/templates/helpers/drawer/title.html'), props: { value: {type: String} }, data: function() { return { titleCheckbox: null }; }, computed: { title: { get: function() { return this.value; }, set: function(t) { this.$emit("input", t); } }, checkbox: { get: function() { if (this.titleCheckbox !== null) { return this.titleCheckbox; } if (this.value && this.value.length) { return true; } return false; }, set: function(v) { if (v === false && this.value && this.value.length) { this.$emit("input", ""); } this.titleCheckbox = v; } } } }); /** * WIDGET HELPERS */ var BucketComponent = countlyVue.views.create({ template: CV.T('/dashboards/templates/helpers/widget/bucket.html'), props: { widgetId: {type: String, required: true}, value: {type: String, required: true} }, data: function() { return { allBuckets: [ { value: "daily", label: this.i18nM("drill.daily") }, { value: "weekly", label: this.i18nM("drill.weekly") }, { value: "monthly", label: this.i18nM("drill.monthly") } ] }; }, computed: { val: function() { return this.value; } }, methods: { onChange: function(b) { var self = this; this.$store.dispatch("countlyDashboards/widgets/update", {id: this.widgetId, settings: {"bucket": b}}).then(function() { self.$store.dispatch("countlyDashboards/widgets/get", self.widgetId); }); this.$emit("input", b); } } }); // var AppsMixin = { // methods: { // getAppname: function(appId) { // var selected = this.$store.getters["countlyDashboards/selected"]; // var dash = selected.data || {}; // var dashboardApps = dash.apps || []; // var appName = "Unknown"; // var appObj = dashboardApps.find(function(app) { // return app._id === appId; // }); // if (appObj && appObj.name) { // appName = appObj.name; // } // else if (countlyGlobal.apps[appId]) { // appName = countlyGlobal.apps[appId].name; // } // return appName; // } // } // }; /** * DRAWER HELPERS REGISTRATION */ Vue.component("clyd-metric", MetricComponent); Vue.component("clyd-datatype", DataTypeComponent); Vue.component("clyd-appcount", AppCountComponent); Vue.component("clyd-sourceapps", SourceAppsComponent); Vue.component("clyd-visualization", VisualizationComponent); Vue.component("clyd-title", TitleComponent); /** * WIDGET HELPERS REGISTRATION */ Vue.component("clyd-bucket", BucketComponent); })();
1
14,870
@itsiprikshit I used a custom v-model to app count component because the sourceapps component was not reacting to user app count selection, e.g. whenever user changed app count selection, the source apps selection multiplicity remained the same. As a matter of fact, user was able to select one application only. Please let me know if you have any thoughts on this.
Countly-countly-server
js
@@ -4,6 +4,7 @@ from mmcv.cnn import ConvModule, kaiming_init from mmcv.runner import auto_fp16, force_fp32 from mmdet.models.builder import HEADS +from mmdet.models.utils import ResLayer, SimplifiedBasicBlock @HEADS.register_module()
1
import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, kaiming_init from mmcv.runner import auto_fp16, force_fp32 from mmdet.models.builder import HEADS @HEADS.register_module() class FusedSemanticHead(nn.Module): r"""Multi-level fused semantic segmentation head. .. code-block:: none in_1 -> 1x1 conv --- | in_2 -> 1x1 conv -- | || in_3 -> 1x1 conv - || ||| /-> 1x1 conv (mask prediction) in_4 -> 1x1 conv -----> 3x3 convs (*4) | \-> 1x1 conv (feature) in_5 -> 1x1 conv --- """ # noqa: W605 def __init__(self, num_ins, fusion_level, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=183, ignore_label=255, loss_weight=0.2, conv_cfg=None, norm_cfg=None): super(FusedSemanticHead, self).__init__() self.num_ins = num_ins self.fusion_level = fusion_level self.num_convs = num_convs self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.num_classes = num_classes self.ignore_label = ignore_label self.loss_weight = loss_weight self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self.lateral_convs = nn.ModuleList() for i in range(self.num_ins): self.lateral_convs.append( ConvModule( self.in_channels, self.in_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False)) self.convs = nn.ModuleList() for i in range(self.num_convs): in_channels = self.in_channels if i == 0 else conv_out_channels self.convs.append( ConvModule( in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_embedding = ConvModule( conv_out_channels, conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1) self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label) def init_weights(self): kaiming_init(self.conv_logits) @auto_fp16() def forward(self, feats): x = self.lateral_convs[self.fusion_level](feats[self.fusion_level]) fused_size = tuple(x.shape[-2:]) for i, feat in enumerate(feats): if i != self.fusion_level: feat = F.interpolate( feat, size=fused_size, mode='bilinear', align_corners=True) x += self.lateral_convs[i](feat) for i in range(self.num_convs): x = self.convs[i](x) mask_pred = self.conv_logits(x) x = self.conv_embedding(x) return mask_pred, x @force_fp32(apply_to=('mask_pred', )) def loss(self, mask_pred, labels): labels = labels.squeeze(1).long() loss_semantic_seg = self.criterion(mask_pred, labels) loss_semantic_seg *= self.loss_weight return loss_semantic_seg
1
22,116
Similarly, we think we may keep `fused_semantic_head.py` unchanged. Then, we could add a new mask head for the desired function.
open-mmlab-mmdetection
py
@@ -175,4 +175,10 @@ public class PhpModelTypeNameConverter implements ModelTypeNameConverter { private static String getPhpPackage(ProtoFile file) { return file.getProto().getPackage().replaceAll("\\.", "\\\\"); } + + @Override + public TypeName getTypeNameForTypedResourceName( + ProtoElement elem, String typedResourceShortName) { + throw new UnsupportedOperationException("getTypeNameForTypedResourceName not supported by PHP"); + } }
1
/* Copyright 2016 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.transformer.php; import com.google.api.codegen.transformer.ModelTypeNameConverter; import com.google.api.codegen.util.TypeName; import com.google.api.codegen.util.TypeNameConverter; import com.google.api.codegen.util.TypedValue; import com.google.api.codegen.util.php.PhpTypeTable; import com.google.api.tools.framework.model.EnumValue; import com.google.api.tools.framework.model.ProtoElement; import com.google.api.tools.framework.model.ProtoFile; import com.google.api.tools.framework.model.TypeRef; import com.google.common.collect.ImmutableMap; import com.google.protobuf.DescriptorProtos.FieldDescriptorProto.Type; public class PhpModelTypeNameConverter implements ModelTypeNameConverter { /** * A map from primitive types in proto to PHP counterparts. */ private static final ImmutableMap<Type, String> PRIMITIVE_TYPE_MAP = ImmutableMap.<Type, String>builder() .put(Type.TYPE_BOOL, "bool") .put(Type.TYPE_DOUBLE, "float") .put(Type.TYPE_FLOAT, "float") .put(Type.TYPE_INT64, "int") .put(Type.TYPE_UINT64, "int") .put(Type.TYPE_SINT64, "int") .put(Type.TYPE_FIXED64, "int") .put(Type.TYPE_SFIXED64, "int") .put(Type.TYPE_INT32, "int") .put(Type.TYPE_UINT32, "int") .put(Type.TYPE_SINT32, "int") .put(Type.TYPE_FIXED32, "int") .put(Type.TYPE_SFIXED32, "int") .put(Type.TYPE_STRING, "string") .put(Type.TYPE_BYTES, "string") .build(); /** * A map from primitive types in proto to zero value in PHP */ private static final ImmutableMap<Type, String> PRIMITIVE_ZERO_VALUE = ImmutableMap.<Type, String>builder() .put(Type.TYPE_BOOL, "false") .put(Type.TYPE_DOUBLE, "0.0") .put(Type.TYPE_FLOAT, "0.0") .put(Type.TYPE_INT64, "0") .put(Type.TYPE_UINT64, "0") .put(Type.TYPE_SINT64, "0") .put(Type.TYPE_FIXED64, "0") .put(Type.TYPE_SFIXED64, "0") .put(Type.TYPE_INT32, "0") .put(Type.TYPE_UINT32, "0") .put(Type.TYPE_SINT32, "0") .put(Type.TYPE_FIXED32, "0") .put(Type.TYPE_SFIXED32, "0") .put(Type.TYPE_STRING, "\"\"") .put(Type.TYPE_BYTES, "\"\"") .build(); private TypeNameConverter typeNameConverter; public PhpModelTypeNameConverter(String implicitPackageName) { this.typeNameConverter = new PhpTypeTable(implicitPackageName); } @Override public TypeName getTypeName(TypeRef type) { if (type.isMap()) { return new TypeName("array"); } else if (type.isRepeated()) { TypeName elementTypeName = getTypeNameForElementType(type); return new TypeName("", "", "%i[]", elementTypeName); } else { return getTypeNameForElementType(type); } } /** * Returns the PHP representation of a type, without cardinality. If the type is a primitive, * getTypeNameForElementType returns it in unboxed form. */ @Override public TypeName getTypeNameForElementType(TypeRef type) { String primitiveTypeName = PRIMITIVE_TYPE_MAP.get(type.getKind()); if (primitiveTypeName != null) { if (primitiveTypeName.contains("\\")) { // Fully qualified type name, use regular type name resolver. Can skip boxing logic // because those types are already boxed. return typeNameConverter.getTypeName(primitiveTypeName); } else { return new TypeName(primitiveTypeName); } } switch (type.getKind()) { case TYPE_MESSAGE: return getTypeName(type.getMessageType()); case TYPE_ENUM: return getTypeName(type.getEnumType()); default: throw new IllegalArgumentException("unknown type kind: " + type.getKind()); } } @Override public TypeName getTypeName(ProtoElement elem) { return typeNameConverter.getTypeName(elem.getFullName().replaceAll("\\.", "\\\\")); } /** * Returns the PHP representation of a zero value for that type, to be used in code sample doc. */ @Override public TypedValue getZeroValue(TypeRef type) { // Don't call getTypeName; we don't need to import these. if (type.isMap()) { return TypedValue.create(new TypeName("array"), "[]"); } if (type.isRepeated()) { return TypedValue.create(new TypeName("array"), "[]"); } if (PRIMITIVE_ZERO_VALUE.containsKey(type.getKind())) { return TypedValue.create(getTypeName(type), PRIMITIVE_ZERO_VALUE.get(type.getKind())); } if (type.isMessage()) { return TypedValue.create(getTypeName(type), "new %s()"); } if (type.isEnum()) { EnumValue enumValue = type.getEnumType().getValues().get(0); return TypedValue.create(getTypeName(type), "%s::" + enumValue.getSimpleName()); } return TypedValue.create(new TypeName(""), "null"); } @Override public String renderPrimitiveValue(TypeRef type, String value) { Type primitiveType = type.getKind(); if (!PRIMITIVE_TYPE_MAP.containsKey(primitiveType)) { throw new IllegalArgumentException( "Initial values are only supported for primitive types, got type " + type + ", with value " + value); } switch (primitiveType) { case TYPE_BOOL: return value.toLowerCase(); case TYPE_STRING: case TYPE_BYTES: return "\"" + value + "\""; default: // Types that do not need to be modified (e.g. TYPE_INT32) are handled // here return value; } } /** * Gets the PHP package for the given proto file. */ private static String getPhpPackage(ProtoFile file) { return file.getProto().getPackage().replaceAll("\\.", "\\\\"); } }
1
17,731
Why implement this? It duplicates the value in FeatureConfig.
googleapis-gapic-generator
java
@@ -159,8 +159,14 @@ public class SparkScanBuilder implements ScanBuilder, SupportsPushDownFilters, S @Override public Scan build() { - return new SparkBatchQueryScan( - spark, table, caseSensitive, schemaWithMetadataColumns(), filterExpressions, options); + // TODO: understand how to differentiate that this is a spark streaming microbatch scan. + if (false) { + return new SparkBatchQueryScan( + spark, table, caseSensitive, schemaWithMetadataColumns(), filterExpressions, options); + } else { + return new SparkMicroBatchStreamScan( + spark, table, caseSensitive, schemaWithMetadataColumns(), filterExpressions, options); + } } public Scan buildMergeScan() {
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.spark.source; import java.util.Collections; import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.iceberg.MetadataColumns; import org.apache.iceberg.Schema; import org.apache.iceberg.Table; import org.apache.iceberg.exceptions.ValidationException; import org.apache.iceberg.expressions.Binder; import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.spark.SparkFilters; import org.apache.iceberg.spark.SparkSchemaUtil; import org.apache.iceberg.types.TypeUtil; import org.apache.iceberg.types.Types; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.connector.read.Scan; import org.apache.spark.sql.connector.read.ScanBuilder; import org.apache.spark.sql.connector.read.SupportsPushDownFilters; import org.apache.spark.sql.connector.read.SupportsPushDownRequiredColumns; import org.apache.spark.sql.sources.Filter; import org.apache.spark.sql.types.StructField; import org.apache.spark.sql.types.StructType; import org.apache.spark.sql.util.CaseInsensitiveStringMap; public class SparkScanBuilder implements ScanBuilder, SupportsPushDownFilters, SupportsPushDownRequiredColumns { private static final Filter[] NO_FILTERS = new Filter[0]; private final SparkSession spark; private final Table table; private final CaseInsensitiveStringMap options; private final List<String> metaColumns = Lists.newArrayList(); private Schema schema = null; private StructType requestedProjection; private boolean caseSensitive; private List<Expression> filterExpressions = null; private Filter[] pushedFilters = NO_FILTERS; private boolean ignoreResiduals = false; SparkScanBuilder(SparkSession spark, Table table, CaseInsensitiveStringMap options) { this.spark = spark; this.table = table; this.options = options; this.caseSensitive = Boolean.parseBoolean(spark.conf().get("spark.sql.caseSensitive")); } private Schema lazySchema() { if (schema == null) { if (requestedProjection != null) { // the projection should include all columns that will be returned, including those only used in filters this.schema = SparkSchemaUtil.prune(table.schema(), requestedProjection, filterExpression(), caseSensitive); } else { this.schema = table.schema(); } } return schema; } private Expression filterExpression() { if (filterExpressions != null) { return filterExpressions.stream().reduce(Expressions.alwaysTrue(), Expressions::and); } return Expressions.alwaysTrue(); } public SparkScanBuilder withMetadataColumns(String... metadataColumns) { Collections.addAll(metaColumns, metadataColumns); return this; } public SparkScanBuilder caseSensitive(boolean isCaseSensitive) { this.caseSensitive = isCaseSensitive; return this; } @Override public Filter[] pushFilters(Filter[] filters) { List<Expression> expressions = Lists.newArrayListWithExpectedSize(filters.length); List<Filter> pushed = Lists.newArrayListWithExpectedSize(filters.length); for (Filter filter : filters) { Expression expr = SparkFilters.convert(filter); if (expr != null) { try { Binder.bind(table.schema().asStruct(), expr, caseSensitive); expressions.add(expr); pushed.add(filter); } catch (ValidationException e) { // binding to the table schema failed, so this expression cannot be pushed down } } } this.filterExpressions = expressions; this.pushedFilters = pushed.toArray(new Filter[0]); // Spark doesn't support residuals per task, so return all filters // to get Spark to handle record-level filtering return filters; } @Override public Filter[] pushedFilters() { return pushedFilters; } @Override public void pruneColumns(StructType requestedSchema) { this.requestedProjection = new StructType(Stream.of(requestedSchema.fields()) .filter(field -> MetadataColumns.nonMetadataColumn(field.name())) .toArray(StructField[]::new)); Stream.of(requestedSchema.fields()) .map(StructField::name) .filter(MetadataColumns::isMetadataColumn) .distinct() .forEach(metaColumns::add); } public SparkScanBuilder ignoreResiduals() { this.ignoreResiduals = true; return this; } private Schema schemaWithMetadataColumns() { // metadata columns List<Types.NestedField> fields = metaColumns.stream() .distinct() .map(MetadataColumns::get) .collect(Collectors.toList()); Schema meta = new Schema(fields); // schema or rows returned by readers return TypeUtil.join(lazySchema(), meta); } @Override public Scan build() { return new SparkBatchQueryScan( spark, table, caseSensitive, schemaWithMetadataColumns(), filterExpressions, options); } public Scan buildMergeScan() { return new SparkMergeScan( spark, table, caseSensitive, ignoreResiduals, schemaWithMetadataColumns(), filterExpressions, options); } }
1
37,711
@aokolnychyi / @RussellSpitzer / @holdenk Spark3 gives ScanBuilder - abstraction - to define all types of Scans (Batch, MicroBatch & Continuous). But, the current implementation / class modelling - has SparkBatchScan as the Scan implementation. Looking at some of the concerns of BatchScan - all the way from the State maintenance of a single SnapshotId to read from, the asOfTimeStamp & features like VectorizedReads - all of these don't seem relevant to Streaming Scans. So, I feel that we need to divide out Streaming Scans into a different class. Does this thought process - make sense? If we go by this route - do you folks know - how to pass different Scan objects to Spark based on Batch vs Streaming?
apache-iceberg
java
@@ -56,10 +56,7 @@ public final class ClaimTypeConverter implements Converter<Map<String, Object>, this.claimTypeConverters.forEach((claimName, typeConverter) -> { if (claims.containsKey(claimName)) { Object claim = claims.get(claimName); - Object mappedClaim = typeConverter.convert(claim); - if (mappedClaim != null) { - result.put(claimName, mappedClaim); - } + result.put(claimName, typeConverter.convert(claim)); } }); return result;
1
/* * Copyright 2002-2019 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.security.oauth2.core.converter; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; import org.springframework.core.convert.converter.Converter; import org.springframework.util.Assert; import org.springframework.util.CollectionUtils; /** * A {@link Converter} that provides type conversion for claim values. * * @author Joe Grandja * @since 5.2 * @see Converter */ public final class ClaimTypeConverter implements Converter<Map<String, Object>, Map<String, Object>> { private final Map<String, Converter<Object, ?>> claimTypeConverters; /** * Constructs a {@code ClaimTypeConverter} using the provided parameters. * @param claimTypeConverters a {@link Map} of {@link Converter}(s) keyed by claim * name */ public ClaimTypeConverter(Map<String, Converter<Object, ?>> claimTypeConverters) { Assert.notEmpty(claimTypeConverters, "claimTypeConverters cannot be empty"); Assert.noNullElements(claimTypeConverters.values().toArray(), "Converter(s) cannot be null"); this.claimTypeConverters = Collections.unmodifiableMap(new LinkedHashMap<>(claimTypeConverters)); } @Override public Map<String, Object> convert(Map<String, Object> claims) { if (CollectionUtils.isEmpty(claims)) { return claims; } Map<String, Object> result = new HashMap<>(claims); this.claimTypeConverters.forEach((claimName, typeConverter) -> { if (claims.containsKey(claimName)) { Object claim = claims.get(claimName); Object mappedClaim = typeConverter.convert(claim); if (mappedClaim != null) { result.put(claimName, mappedClaim); } } }); return result; } }
1
18,034
Will you please update these files to use a copyright end date of 2021?
spring-projects-spring-security
java
@@ -10,6 +10,17 @@ if (window.__AXE_EXTENSION__) { /*eslint indent: 0*/ var testUtils = {}; +var fixture = document.createElement('div'); +fixture.setAttribute('id', 'fixture'); +document.body.insertBefore(fixture, document.body.firstChild); + +/*eslint no-unused-vars: 0*/ +var checks, commons; +var originalChecks = (checks = axe._audit.checks); +var originalAudit = axe._audit; +var originalRules = axe._audit.rules; +var originalCommons = (commons = axe.commons); + /** * Create a check context for mocking/resetting data and relatedNodes in tests *
1
/* global axe, checks */ // Let the user know they need to disable their axe/attest extension before running the tests. if (window.__AXE_EXTENSION__) { throw new Error( 'You must disable your axe/attest browser extension in order to run the test suite.' ); } /*eslint indent: 0*/ var testUtils = {}; /** * Create a check context for mocking/resetting data and relatedNodes in tests * * @return Object */ testUtils.MockCheckContext = function() { 'use strict'; return { _relatedNodes: [], _data: null, // When using this.async() in a check, assign a function to _onAsync // to catch the response. _onAsync: null, async: function() { var self = this; return function(result) { // throws if _onAsync isn't set self._onAsync(result, self); }; }, data: function(d) { this._data = d; }, relatedNodes: function(nodes) { this._relatedNodes = Array.isArray(nodes) ? nodes : [nodes]; }, reset: function() { this._data = null; this._relatedNodes = []; this._onAsync = null; } }; }; /** * Provide an API for determining Shadow DOM v0 and v1 support in tests. * * @param HTMLDocumentElement The document of the current context * @return Object */ testUtils.shadowSupport = (function(document) { 'use strict'; var v0 = document.body && typeof document.body.createShadowRoot === 'function', v1 = document.body && typeof document.body.attachShadow === 'function'; return { v0: v0 === true, v1: v1 === true, undefined: document.body && typeof document.body.attachShadow === 'undefined' && typeof document.body.createShadowRoot === 'undefined' }; })(document); /** * Method for injecting content into a fixture and caching * the flattened DOM tree (light and Shadow DOM together) * * @param {String|Node} content Stuff to go into the fixture (html or DOM node) * @return HTMLElement */ testUtils.fixtureSetup = function(content) { 'use strict'; var fixture = document.querySelector('#fixture'); if (typeof content !== 'undefined') { fixture.innerHTML = ''; } if (typeof content === 'string') { fixture.innerHTML = content; } else if (content instanceof Node) { fixture.appendChild(content); } else if (Array.isArray(content)) { content.forEach(function(node) { fixture.appendChild(node); }); } axe._tree = axe.utils.getFlattenedTree(fixture); axe._selectorData = axe.utils.getSelectorData(axe._tree); return fixture; }; /** * Create check arguments * * @param Node|String Stuff to go into the fixture (html or node) * @param Object Options argument for the check (optional, default: {}) * @param String Target for the check, CSS selector (default: '#target') * @return Array */ testUtils.checkSetup = function(content, options, target) { 'use strict'; // Normalize the params if (typeof options !== 'object') { target = options; options = {}; } // Normalize target, allow it to be the inserted node or '#target' target = target || (content instanceof Node ? content : '#target'); testUtils.fixtureSetup(content); var node; if (typeof target === 'string') { node = axe.utils.querySelectorAll(axe._tree[0], target)[0]; } else if (target instanceof Node) { node = axe.utils.getNodeFromTree(target); } else { node = target; } return [node.actualNode, options, node]; }; /** * Create check arguments with Shadow DOM. Target can be inside or outside of Shadow DOM, queried by * adding `id="target"` to a fragment. Or specify a custom selector as the `targetSelector` argument. * * @param Node|String Stuff to go into the fixture (html string or DOM Node) * @param Node|String Stuff to go into the shadow boundary (html or node) * @param Object Options argument for the check (optional, default: {}) * @param String Target selector for the check, can be inside or outside of Shadow DOM (optional, default: '#target') * @return Array */ testUtils.shadowCheckSetup = function( content, shadowContent, options, targetSelector ) { 'use strict'; // Normalize target, allow it to be the provided string or use '#target' to query composed tree if (typeof targetSelector !== 'string') { targetSelector = '#target'; } // Normalize the object params if (typeof options !== 'object') { options = {}; } var fixture = testUtils.fixtureSetup(content); var targetCandidate = fixture.querySelector(targetSelector); var container = targetCandidate; if (!targetCandidate) { // check if content specifies a shadow container container = fixture.querySelector('#shadow'); if (!container) { container = fixture.firstChild; } } // attach a shadowRoot with the content provided var shadowRoot = container.attachShadow({ mode: 'open' }); if (typeof shadowContent === 'string') { shadowRoot.innerHTML = shadowContent; } else if (content instanceof Node) { shadowRoot.appendChild(shadowContent); } if (!targetCandidate) { targetCandidate = shadowRoot.querySelector(targetSelector); } if (!targetSelector && !targetCandidate) { throw 'shadowCheckSetup requires at least one fragment to have #target, or a provided targetSelector'; } // query the composed tree AFTER shadowDOM has been attached axe._tree = axe.utils.getFlattenedTree(fixture); var node = axe.utils.getNodeFromTree(targetCandidate); return [node.actualNode, options, node]; }; /** * Setup axe._tree flat tree * @param Node Stuff to go in the flat tree * @returns vNode[] */ testUtils.flatTreeSetup = function(content) { axe._tree = axe.utils.getFlattenedTree(content); return axe._tree; }; /** * Wait for all nested frames to be loaded * * @param Object Window to wait for (optional) * @param function Callback, called once resolved */ testUtils.awaitNestedLoad = function awaitNestedLoad(win, cb) { 'use strict'; if (typeof win === 'function') { cb = win; win = window; } var document = win.document; var q = axe.utils.queue(); // Wait for page load q.defer(function(resolve) { if (document.readyState === 'complete') { resolve(); } else { win.addEventListener('load', resolve); } }); // Wait for all frames to be loaded Array.from(document.querySelectorAll('iframe')).forEach(function(frame) { q.defer(function(resolve) { return awaitNestedLoad(frame.contentWindow, resolve); }); }); // Complete (don't pass the args on to the callback) q.then(function() { cb(); }); }; /** * Add a given stylesheet dynamically to the document * * @param {Object} data composite object containing properties to create stylesheet * @property {String} data.href relative or absolute url for stylesheet to be loaded * @property {Boolean} data.mediaPrint boolean to represent if the constructed sheet is for print media * @property {String} data.text text contents to be written to the stylesheet * @property {String} data.id id reference to link or style to be added to document * @param {Object} rootNode document/fragment to which to append style * @returns {Object} axe.utils.queue */ testUtils.addStyleSheet = function addStyleSheet(data, rootNode) { var doc = rootNode ? rootNode : document; var q = axe.utils.queue(); if (data.href) { q.defer(function(resolve, reject) { var link = doc.createElement('link'); link.rel = 'stylesheet'; link.href = data.href; if (data.id) { link.id = data.id; } if (data.mediaPrint) { link.media = 'print'; } link.onload = function() { setTimeout(function() { resolve(); }); }; link.onerror = function() { reject(); }; doc.head.appendChild(link); }); } else { q.defer(function(resolve) { var style = doc.createElement('style'); if (data.id) { style.id = data.id; } style.type = 'text/css'; style.appendChild(doc.createTextNode(data.text)); doc.head.appendChild(style); setTimeout(function() { resolve(); }, 100); // -> note: gives firefox to load (document.stylesheets), other browsers are fine. }); } return q; }; /** * Add a list of stylesheets * * @param {Object} sheets array of sheets data object * @returns {Object} axe.utils.queue */ testUtils.addStyleSheets = function addStyleSheets(sheets, rootNode) { var q = axe.utils.queue(); sheets.forEach(function(data) { q.defer(axe.testUtils.addStyleSheet(data, rootNode)); }); return q; }; /** * Remove a list of stylesheets from the document * @param {Array<Object>} sheets array of sheets data object * @returns {Object} axe.utils.queue */ testUtils.removeStyleSheets = function removeStyleSheets(sheets) { var q = axe.utils.queue(); sheets.forEach(function(data) { q.defer(function(resolve, reject) { var node = document.getElementById(data.id); if (!node || !node.parentNode) { reject(); } node.parentNode.removeChild(node); resolve(); }); }); return q; }; /** * Assert a given stylesheet against selectorText and cssText * * @param {Object} sheet CSS Stylesheet * @param {String} selectorText CSS Selector * @param {String} cssText CSS Values * @param {Boolean} includes (Optional) flag to check if existence of selectorText within cssText */ testUtils.assertStylesheet = function assertStylesheet( sheet, selectorText, cssText, includes ) { assert.isDefined(sheet); assert.property(sheet, 'cssRules'); if (includes) { assert.isTrue(cssText.includes(selectorText)); } else { assert.equal(sheet.cssRules[0].selectorText, selectorText); // compare the selector properties var styleEl = document.createElement('style'); styleEl.type = 'text/css'; styleEl.innerHTML = cssText; document.body.appendChild(styleEl); var testSheet = document.styleSheets[document.styleSheets.length - 1]; var sheetRule = sheet.cssRules[0]; var testRule = testSheet.cssRules[0]; try { for (var i = 0; i < testRule.style.length; i++) { var property = testRule.style[i]; assert.equal(sheetRule.style[property], testRule.style[property]); } } finally { styleEl.parentNode.removeChild(styleEl); } } }; /* * Injecting content into a fixture and return queried element within fixture * * @param {String|Node} content to go into the fixture (html or DOM node) * @return HTMLElement */ testUtils.queryFixture = function queryFixture(html, query) { testUtils.fixtureSetup(html); return axe.utils.querySelectorAll(axe._tree, query || '#target')[0]; }; /** * Return the checks evaluate method and apply default options * @param {String} checkId - ID of the check * @return Function */ testUtils.getCheckEvaluate = function getCheckEvaluate(checkId) { var check = checks[checkId]; return function evaluateWrapper(node, options, virtualNode, context) { var opts = check.getOptions(options); return check.evaluate.call(this, node, opts, virtualNode, context); }; }; /** * Test function for detecting IE11 user agent string * * @param {Object} navigator The navigator object of the current browser * @return {boolean} */ testUtils.isIE11 = (function isIE11(navigator) { return navigator.userAgent.indexOf('Trident/7') !== -1; })(navigator); axe.testUtils = testUtils; afterEach(function() { axe._cache.clear(); });
1
16,082
Adding fixture to the body as we don't have the `runner.tmpl` anymore
dequelabs-axe-core
js
@@ -6,11 +6,15 @@ import ( log "github.com/sirupsen/logrus" + librarygocontroller "github.com/openshift/library-go/pkg/controller" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/workqueue" + "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller"
1
package fakeclusterinstall import ( "context" "time" log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" hivev1 "github.com/openshift/hive/apis/hive/v1" hiveint "github.com/openshift/hive/apis/hiveinternal/v1alpha1" hivemetrics "github.com/openshift/hive/pkg/controller/metrics" controllerutils "github.com/openshift/hive/pkg/controller/utils" ) const ( ControllerName = hivev1.FakeClusterInstallControllerName ) // Add creates a new FakeClusterInstall controller and adds it to the manager with default RBAC. func Add(mgr manager.Manager) error { logger := log.WithField("controller", ControllerName) concurrentReconciles, clientRateLimiter, queueRateLimiter, err := controllerutils.GetControllerConfig(mgr.GetClient(), ControllerName) if err != nil { logger.WithError(err).Error("could not get controller configurations") return err } return AddToManager(mgr, NewReconciler(mgr, clientRateLimiter), concurrentReconciles, queueRateLimiter) } // NewReconciler returns a new reconcile.Reconciler func NewReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) reconcile.Reconciler { r := &ReconcileClusterInstall{ Client: controllerutils.NewClientWithMetricsOrDie(mgr, ControllerName, &rateLimiter), scheme: mgr.GetScheme(), logger: log.WithField("controller", ControllerName), } return r } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { c, err := controller.New("fakeclusterinstall-controller", mgr, controller.Options{ Reconciler: r, MaxConcurrentReconciles: concurrentReconciles, RateLimiter: rateLimiter, }) if err != nil { log.WithField("controller", ControllerName).WithError(err).Error("Error creating new fakeclusterinstall controller") return err } // Watch for changes to FakeClusterInstall err = c.Watch(&source.Kind{Type: &hiveint.FakeClusterInstall{}}, &handler.EnqueueRequestForObject{}) if err != nil { log.WithField("controller", ControllerName).WithError(err).Error("Error watching FakeClusterInstall") return err } // TODO: also watch for changes to ClusterDeployment? Agent installs try to respond to changes there as well. return nil } // ReconcileClusterInstall is the reconciler for FakeClusterInstall. type ReconcileClusterInstall struct { client.Client scheme *runtime.Scheme logger log.FieldLogger } // Reconcile ensures that a given FakeClusterInstall resource exists and reflects the state of cluster operators from its target cluster func (r *ReconcileClusterInstall) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { logger := controllerutils.BuildControllerLogger(ControllerName, "fakeClusterInstall", request.NamespacedName) logger.Info("reconciling FakeClusterInstall") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, logger) defer recobsrv.ObserveControllerReconcileTime() // Fetch the FakeClusterInstall instance fci := &hiveint.FakeClusterInstall{} err := r.Get(context.TODO(), request.NamespacedName, fci) if err != nil { if apierrors.IsNotFound(err) { // Object not found, return. Created objects are automatically garbage collected. // For additional cleanup logic use finalizers. logger.Debug("FakeClusterInstall not found") return reconcile.Result{}, nil } // Error reading the object - requeue the request. logger.WithError(err).Error("Error getting FakeClusterInstall") return reconcile.Result{}, err } if !fci.DeletionTimestamp.IsZero() { logger.Info("FakeClusterInstall resource has been deleted") return reconcile.Result{}, nil } // Ensure our conditions are present, default state should be Unknown per Kube guidelines: conditionTypes := []string{ // These conditions are required by Hive: hivev1.ClusterInstallCompleted, hivev1.ClusterInstallFailed, hivev1.ClusterInstallStopped, hivev1.ClusterInstallRequirementsMet, } var anyChanged bool for _, condType := range conditionTypes { c := controllerutils.FindClusterInstallCondition(fci.Status.Conditions, condType) if c == nil { logger.WithField("condition", condType).Info("initializing condition with Unknown status") newConditions, changed := controllerutils.SetClusterInstallConditionWithChangeCheck( fci.Status.Conditions, condType, corev1.ConditionUnknown, "", "", controllerutils.UpdateConditionAlways) fci.Status.Conditions = newConditions anyChanged = anyChanged || changed } } if anyChanged { err := updateClusterInstallStatus(r.Client, fci, logger) return reconcile.Result{}, err } // Check if we're Completed and can exit reconcile early. completedCond := controllerutils.FindClusterInstallCondition(fci.Status.Conditions, hivev1.ClusterInstallCompleted) if completedCond.Status == corev1.ConditionTrue { // Ensure Stopped=True newConditions, changedStopped := controllerutils.SetClusterInstallConditionWithChangeCheck( fci.Status.Conditions, hivev1.ClusterInstallStopped, corev1.ConditionTrue, "ClusterInstalled", "Cluster install completed successfully", controllerutils.UpdateConditionIfReasonOrMessageChange) // Ensure Failed=False newConditions, changedFailed := controllerutils.SetClusterInstallConditionWithChangeCheck( newConditions, hivev1.ClusterInstallFailed, corev1.ConditionFalse, "ClusterInstalled", "Cluster install completed successfully", controllerutils.UpdateConditionIfReasonOrMessageChange) if changedStopped || changedFailed { fci.Status.Conditions = newConditions err := updateClusterInstallStatus(r.Client, fci, logger) return reconcile.Result{}, err } logger.Info("cluster install completed, no work left to be done") return reconcile.Result{}, err } // NOTE: While this controller does not support a Stopped=True Completed=False state (it will try // forever), most real implementations would want to check if it's time to give up here. // Ensure Stopped=False as we are actively working to reconcile: newConditions, changed := controllerutils.SetClusterInstallConditionWithChangeCheck( fci.Status.Conditions, hivev1.ClusterInstallStopped, corev1.ConditionFalse, "InProgress", "Cluster install in progress", controllerutils.UpdateConditionIfReasonOrMessageChange) if changed { fci.Status.Conditions = newConditions err := updateClusterInstallStatus(r.Client, fci, logger) return reconcile.Result{}, err } // Fetch corresponding ClusterDeployment instance cd := &hivev1.ClusterDeployment{} switch err = r.Get(context.TODO(), request.NamespacedName, cd); { case apierrors.IsNotFound(err): // TODO: assuming same name, add explicit reference of some kind between cluster install and cluster deplopyment logger.WithField("clusterDeployment", request.NamespacedName).Info("ClusterDeployment not found") return reconcile.Result{}, nil case err != nil: logger.WithError(err).Error("Error getting ClusterDeployment") return reconcile.Result{}, err } if !cd.DeletionTimestamp.IsZero() { logger.Debug("ClusterDeployment has been deleted") return reconcile.Result{}, nil } // Simulate 30 second wait for RequirementsMet condition to go True: reqsCond := controllerutils.FindClusterInstallCondition(fci.Status.Conditions, hivev1.ClusterInstallRequirementsMet) switch reqsCond.Status { case corev1.ConditionUnknown: logger.Info("setting RequirementsMet condition to False") newConditions, changed := controllerutils.SetClusterInstallConditionWithChangeCheck( fci.Status.Conditions, hivev1.ClusterInstallRequirementsMet, corev1.ConditionFalse, "WaitingForRequirements", "Waiting 30 seconds before considering requirements met", controllerutils.UpdateConditionIfReasonOrMessageChange) if changed { fci.Status.Conditions = newConditions err := updateClusterInstallStatus(r.Client, fci, logger) return reconcile.Result{}, err } case corev1.ConditionFalse: // Check if it's been 30 seconds since we set condition to False: delta := time.Now().Sub(reqsCond.LastTransitionTime.Time) if delta < 30*time.Second { // requeue for remainder of delta return reconcile.Result{RequeueAfter: 30*time.Second - delta}, nil } logger.Info("setting RequirementsMet condition to True") newConditions, changed := controllerutils.SetClusterInstallConditionWithChangeCheck( fci.Status.Conditions, hivev1.ClusterInstallRequirementsMet, corev1.ConditionTrue, "AllRequirementsMet", "All requirements met", controllerutils.UpdateConditionIfReasonOrMessageChange) if changed { fci.Status.Conditions = newConditions err := updateClusterInstallStatus(r.Client, fci, logger) return reconcile.Result{}, err } } // Simulate 30 second wait for Completed condition to go True: switch completedCond.Status { case corev1.ConditionUnknown: logger.Info("setting Completed condition to False") newConditions, changed := controllerutils.SetClusterInstallConditionWithChangeCheck( fci.Status.Conditions, hivev1.ClusterInstallCompleted, corev1.ConditionFalse, "InProgress", "Installation in progress", controllerutils.UpdateConditionIfReasonOrMessageChange) if changed { fci.Status.Conditions = newConditions err := updateClusterInstallStatus(r.Client, fci, logger) return reconcile.Result{}, err } case corev1.ConditionFalse: // Set ClusterMetadata if install is underway: if fci.Spec.ClusterMetadata == nil { fci.Spec.ClusterMetadata = &hivev1.ClusterMetadata{ ClusterID: "not-a-real-cluster", InfraID: "not-a-real-cluster", // TODO: do we need to create dummy secrets? AdminKubeconfigSecretRef: corev1.LocalObjectReference{Name: "admin-kubeconfig"}, AdminPasswordSecretRef: corev1.LocalObjectReference{Name: "admin-password"}, } logger.Info("setting fake ClusterMetadata") return reconcile.Result{}, r.Client.Update(context.Background(), fci) } // Check if it's been 30 seconds since we set condition to False: delta := time.Now().Sub(completedCond.LastTransitionTime.Time) if delta < 30*time.Second { // requeue for remainder of delta return reconcile.Result{RequeueAfter: 30*time.Second - delta}, nil } logger.Info("setting Completed condition to True") newConditions, changed := controllerutils.SetClusterInstallConditionWithChangeCheck( fci.Status.Conditions, hivev1.ClusterInstallCompleted, corev1.ConditionTrue, "ClusterInstalled", "Cluster install completed successfully", controllerutils.UpdateConditionIfReasonOrMessageChange) if changed { fci.Status.Conditions = newConditions err := updateClusterInstallStatus(r.Client, fci, logger) return reconcile.Result{}, err } } logger.Info("cluster is already installed") return reconcile.Result{}, nil } func updateClusterInstallStatus(c client.Client, fci *hiveint.FakeClusterInstall, logger log.FieldLogger) error { // TODO: deepequals check logger.Info("updating status") return c.Status().Update(context.Background(), fci) }
1
17,757
nit: just for consistency we can move this import block down. Right above openshift/hive imports
openshift-hive
go
@@ -57,7 +57,7 @@ class MessageLocationTuple(NamedTuple): class ManagedMessage(NamedTuple): - """Tuple with information ahout a managed message of the linter""" + """Tuple with information about a managed message of the linter""" name: Optional[str] msgid: str
1
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/main/LICENSE """A collection of typing utilities.""" import sys from typing import NamedTuple, Optional, Union if sys.version_info >= (3, 8): from typing import Literal, TypedDict else: from typing_extensions import Literal, TypedDict class FileItem(NamedTuple): """Represents data about a file handled by pylint Each file item has: - name: full name of the module - filepath: path of the file - modname: module name """ name: str filepath: str modpath: str class ModuleDescriptionDict(TypedDict): """Represents data about a checked module""" path: str name: str isarg: bool basepath: str basename: str class ErrorDescriptionDict(TypedDict): """Represents data about errors collected during checking of a module""" key: Literal["fatal"] mod: str ex: Union[ImportError, SyntaxError] class MessageLocationTuple(NamedTuple): """Tuple with information about the location of a to-be-displayed message""" abspath: str path: str module: str obj: str line: int column: int end_line: Optional[int] = None end_column: Optional[int] = None class ManagedMessage(NamedTuple): """Tuple with information ahout a managed message of the linter""" name: Optional[str] msgid: str symbol: str line: Optional[int] is_disabled: bool
1
19,806
Perhaps clean up the commit history and don't squash?
PyCQA-pylint
py
@@ -162,7 +162,7 @@ final class ScribeInboundHandler extends ChannelInboundHandlerAdapter { buf.release(); } } else { - returned.writeBytes(content.array(), content.offset(), content.length()); + returned.writeBytes(content.array(), 0, content.length()); } if (responseIndex == previouslySentResponseIndex + 1) {
1
/* * Copyright 2015-2019 The OpenZipkin Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package zipkin2.collector.scribe; import com.linecorp.armeria.common.HttpData; import com.linecorp.armeria.common.HttpHeaderNames; import com.linecorp.armeria.common.HttpMethod; import com.linecorp.armeria.common.HttpRequest; import com.linecorp.armeria.common.HttpResponse; import com.linecorp.armeria.common.RequestHeaders; import com.linecorp.armeria.common.util.Exceptions; import com.linecorp.armeria.common.util.SafeCloseable; import com.linecorp.armeria.server.ServiceRequestContext; import com.linecorp.armeria.server.ServiceRequestContextBuilder; import com.linecorp.armeria.server.thrift.THttpService; import com.linecorp.armeria.unsafe.ByteBufHttpData; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufHolder; import io.netty.buffer.CompositeByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.Channel; import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.EventLoop; import java.util.HashMap; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static zipkin2.Call.propagateIfFatal; final class ScribeInboundHandler extends ChannelInboundHandlerAdapter { static final Logger logger = LoggerFactory.getLogger(ScribeInboundHandler.class); // Headers mostly copied from https://github.com/apache/thrift/blob/master/lib/javame/src/org/apache/thrift/transport/THttpClient.java#L130 static final RequestHeaders THRIFT_HEADERS = RequestHeaders.builder( HttpMethod.POST, "/internal/zipkin-thriftrpc") .set(HttpHeaderNames.CONTENT_TYPE, "application/x-thrift") .set(HttpHeaderNames.ACCEPT, "application/x-thrift") .set(HttpHeaderNames.USER_AGENT, "Zipkin/ScribeInboundHandler") .build(); final THttpService scribeService; ScribeInboundHandler(ScribeSpanConsumer scribe) { scribeService = THttpService.of(scribe); } enum ReadState { HEADER, PAYLOAD } CompositeByteBuf pending; ReadState state; int nextFrameSize; Map<Integer, ByteBuf> pendingResponses = new HashMap<>(); int nextResponseIndex = 0; int previouslySentResponseIndex = -1; @Override public void channelActive(ChannelHandlerContext ctx) { pending = ctx.alloc().compositeBuffer(); state = ReadState.HEADER; } @Override public void channelRead(final ChannelHandlerContext ctx, Object msg) { if (pending == null) return; // Already closed (probably due to an exception). assert msg instanceof ByteBuf; ByteBuf buf = (ByteBuf) msg; pending.addComponent(true, buf); switch (state) { case HEADER: maybeReadHeader(ctx); break; case PAYLOAD: maybeReadPayload(ctx); break; } } @Override public void channelInactive(ChannelHandlerContext ctx) { release(); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { Exceptions.logIfUnexpected(logger, ctx.channel(), cause); release(); closeOnFlush(ctx.channel()); } void maybeReadHeader(ChannelHandlerContext ctx) { if (pending.readableBytes() < 4) return; nextFrameSize = pending.readInt(); state = ReadState.PAYLOAD; maybeReadPayload(ctx); } void maybeReadPayload(ChannelHandlerContext ctx) { if (pending.readableBytes() < nextFrameSize) return; ByteBuf payload = ctx.alloc().buffer(nextFrameSize); pending.readBytes(payload, nextFrameSize); pending.discardSomeReadBytes(); state = ReadState.HEADER; HttpRequest request = HttpRequest.of(THRIFT_HEADERS, new ByteBufHttpData(payload, true)); ServiceRequestContextBuilder requestContextBuilder = ServiceRequestContextBuilder.of(request) .service(scribeService) .alloc(ctx.alloc()); if (ctx.executor() instanceof EventLoop) { requestContextBuilder.eventLoop((EventLoop) ctx.executor()); } ServiceRequestContext requestContext = requestContextBuilder.build(); final HttpResponse response; try (SafeCloseable unused = requestContext.push()) { response = scribeService.serve(requestContext, request); } catch (Throwable t) { propagateIfFatal(t); exceptionCaught(ctx, t); return; } int responseIndex = nextResponseIndex++; response.aggregateWithPooledObjects(ctx.executor(), ctx.alloc()).handle((msg, t) -> { if (t != null) { exceptionCaught(ctx, t); return null; } HttpData content = msg.content(); ByteBuf returned = ctx.alloc().buffer(content.length() + 4); returned.writeInt(content.length()); if (content instanceof ByteBufHolder) { ByteBuf buf = ((ByteBufHolder) content).content(); try { returned.writeBytes(buf); } finally { buf.release(); } } else { returned.writeBytes(content.array(), content.offset(), content.length()); } if (responseIndex == previouslySentResponseIndex + 1) { ctx.writeAndFlush(returned); previouslySentResponseIndex++; flushResponses(ctx); } else { pendingResponses.put(responseIndex, returned); } return null; }); } void flushResponses(ChannelHandlerContext ctx) { while (!pendingResponses.isEmpty()) { ByteBuf response = pendingResponses.remove(previouslySentResponseIndex + 1); if (response == null) { return; } ctx.writeAndFlush(response); previouslySentResponseIndex++; } } void release() { if (pending != null) { pending.release(); pending = null; } pendingResponses.values().forEach(ByteBuf::release); pendingResponses.clear(); } /** * Closes the specified channel after all queued write requests are flushed. */ static void closeOnFlush(Channel ch) { if (ch.isActive()) { ch.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE); } } }
1
15,008
Just `writeBytes(content.array())` Back to my original, somewhat broken version ;)
openzipkin-zipkin
java
@@ -95,6 +95,16 @@ func TestCrossdock(t *testing.T) { "transport": []string{"http", "tchannel"}, }, }, + { + name: "ctxpropagation", + axes: axes{ + "transport": []string{"http", "tchannel"}, + }, + params: params{ + "ctxserver": "127.0.0.1", + "ctxclient": "127.0.0.1", + }, + }, } for _, bb := range behaviors {
1
// Copyright (c) 2016 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package main import ( "net/url" "testing" "github.com/yarpc/yarpc-go/crossdock-go" "github.com/yarpc/yarpc-go/crossdock/client" "github.com/yarpc/yarpc-go/crossdock/server" ) const clientURL = "http://127.0.0.1:8080" func TestCrossdock(t *testing.T) { server.Start() defer server.Stop() go client.Start() crossdock.Wait(t, clientURL, 10) type params map[string]string type axes map[string][]string defaultParams := params{"server": "127.0.0.1"} behaviors := []struct { name string params params axes axes }{ { name: "raw", axes: axes{"transport": []string{"http", "tchannel"}}, }, { name: "json", axes: axes{"transport": []string{"http", "tchannel"}}, }, { name: "thrift", axes: axes{"transport": []string{"http", "tchannel"}}, }, { name: "errors", }, { name: "headers", axes: axes{ "transport": []string{"http", "tchannel"}, "encoding": []string{"raw", "json", "thrift"}, }, }, { name: "tchclient", axes: axes{ "encoding": []string{"raw", "json", "thrift"}, }, }, { name: "tchserver", axes: axes{ "encoding": []string{"raw", "json", "thrift"}, }, }, { name: "thriftgauntlet", axes: axes{ "transport": []string{"http", "tchannel"}, }, }, { name: "outboundttl", axes: axes{ "transport": []string{"http", "tchannel"}, }, }, } for _, bb := range behaviors { args := url.Values{} for k, v := range defaultParams { args.Set(k, v) } if len(bb.axes) == 0 { crossdock.Call(t, clientURL, bb.name, args) continue } for _, entry := range crossdock.Combinations(bb.axes) { entryArgs := url.Values{} for k := range args { entryArgs.Set(k, args.Get(k)) } for k, v := range entry { entryArgs.Set(k, v) } for k, v := range bb.params { entryArgs.Set(k, v) } crossdock.Call(t, clientURL, bb.name, entryArgs) } } }
1
9,977
I couldn't tell from reading the test - do we every criss-cross transports in the same test? I want to make sure we aren't breaking the chain...
yarpc-yarpc-go
go